Compare commits

...

19 Commits
1.6.1 ... 1.6.2

Author SHA1 Message Date
Daniel Nelson
1fb4283f31 Telegraf 1.6.2 2018-05-08 12:47:33 -07:00
Daniel Nelson
3187d58d92 Update changelog
(cherry picked from commit 2a2cc3212f)
2018-05-08 12:47:16 -07:00
Daniel Nelson
a8d33a26fb Add uint/bool support to cratedb output (#4117)
(cherry picked from commit b11468757c)
2018-05-08 12:45:01 -07:00
Daniel Nelson
de3b60c2b7 Update changelog
(cherry picked from commit 4c35a56edd)
2018-05-04 18:31:58 -07:00
Daniel Nelson
469ba00e7d Only lowercase mysql slave metrics with metric_version = 2
(cherry picked from commit 8b687a8e21)
2018-05-04 18:29:43 -07:00
Daniel Nelson
7e25f98eb5 Update kafka readme
(cherry picked from commit b2bb44363a)
2018-05-04 14:40:10 -07:00
Daniel Nelson
f5e9ec8fc1 Fix grammar
(cherry picked from commit fd63591b15)
2018-05-04 14:40:10 -07:00
Daniel Nelson
89d0d8455b Clarify max_retry option in kafka output
(cherry picked from commit 2108582b43)
2018-05-04 14:40:10 -07:00
Nicolas Steinmetz
81446d8cf3 Fix name_override example in mysql rreadme (#4100)
(cherry picked from commit 81620c69c5)
2018-05-04 14:20:59 -07:00
Daniel Nelson
7372e62083 Update gopsutil
New version removes accidentally added code under the GPL license.
2018-05-03 12:35:48 -07:00
Daniel Nelson
6705b2bd94 Update changelog
(cherry picked from commit 2fb3f7a585)
2018-05-03 11:41:49 -07:00
Daniel Meiners
f21f31a08a Ignore UTF8 BOM in JSON parser (#4099)
(cherry picked from commit 9647ea88ea)
2018-05-03 11:41:49 -07:00
Daniel Nelson
ae9e77dab8 Update telegraf.conf 2018-05-02 11:51:36 -07:00
Daniel Nelson
c7a6d8e9a2 Remove dead link from logparser sample config
(cherry picked from commit 239333ad90)
2018-05-02 11:50:48 -07:00
Daniel Nelson
ad6f40c16c Update changelog
(cherry picked from commit fd64487be5)
2018-05-01 18:57:45 -07:00
Daniel Nelson
b7e63ac6e8 Fix handling of uint64 in datadog output (#4091)
(cherry picked from commit cff7ee8edf)
2018-05-01 18:57:45 -07:00
Daniel Nelson
91352a29bb Update changelog
(cherry picked from commit 908170b207)
2018-04-27 14:57:08 -07:00
Vincent Caron
d45b64d7a0 Use same timestamp for fields in system input (#4078)
(cherry picked from commit ec47cab950)
2018-04-27 14:56:53 -07:00
Daniel Nelson
d496ff16bf Cleanup changelog 2018-04-23 15:15:40 -07:00
16 changed files with 121 additions and 77 deletions

View File

@@ -1,18 +1,12 @@
## v1.6.1 [unreleased]
## v1.7 [unreleased]
## v1.6.2 [2018-05-08]
### New Inputs
### Bugfixes
- [fibaro](./plugins/inputs/fibaro/README.md) - Contributed by @dynek
- [nvidia_smi](./plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin
### Features
- [#3964](https://github.com/influxdata/telegraf/pull/3964): Add repl_oplog_window_sec metric to mongodb input.
- [#3819](https://github.com/influxdata/telegraf/pull/3819): Add per-host shard metrics in mongodb input.
- [#3999](https://github.com/influxdata/telegraf/pull/3999): Skip files with leading `..` in config directory.
- [#4021](https://github.com/influxdata/telegraf/pull/4021): Add TLS support to socket_writer and socket_listener plugins.
- [#4025](https://github.com/influxdata/telegraf/pull/4025): Add snmp input option to strip non fixed length index suffixes.
- [#4078](https://github.com/influxdata/telegraf/pull/4078): Use same timestamp for fields in system input.
- [#4091](https://github.com/influxdata/telegraf/pull/4091): Fix handling of uint64 in datadog output.
- [#4099](https://github.com/influxdata/telegraf/pull/4099): Ignore UTF8 BOM in JSON parser.
- [#4104](https://github.com/influxdata/telegraf/issues/4104): Fix case for slave metrics in mysql input.
- [#4110](https://github.com/influxdata/telegraf/issues/4110): Fix uint support in cratedb output.
## v1.6.1 [2018-04-23]

2
Godeps
View File

@@ -66,7 +66,7 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil a5c2888e464b14fa882c2a059e0f95716bd45cf1
github.com/shirou/gopsutil c95755e4bcd7a62bb8bd33f3a597a7c7f35e2cf3
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d

View File

@@ -79,15 +79,15 @@ services:
- "389:389"
- "636:636"
crate:
image: crate/crate
ports:
- "4200:4200"
- "4230:4230"
command:
- crate
- -Cnetwork.host=0.0.0.0
- -Ctransport.host=localhost
- -Clicense.enterprise=false
environment:
- CRATE_HEAP_SIZE=128m
- JAVA_OPTS='-Xms256m -Xmx256m'
image: crate/crate
ports:
- "4200:4200"
- "4230:4230"
- "5432:5432"
command:
- crate
- -Cnetwork.host=0.0.0.0
- -Ctransport.host=localhost
- -Clicense.enterprise=false
environment:
- CRATE_HEAP_SIZE=128m

View File

@@ -3017,7 +3017,6 @@
# # watch_method = "inotify"
#
# ## Parse logstash-style "grok" patterns:
# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
# [inputs.logparser.grok]
# ## This is a list of patterns to check the given log file(s) for.
# ## Note that adding patterns here increases processing time. The most

View File

@@ -70,7 +70,6 @@ const sampleConfig = `
# watch_method = "inotify"
## Parse logstash-style "grok" patterns:
## Telegraf built-in parsing patterns: https://goo.gl/dkay10
[inputs.logparser.grok]
## This is a list of patterns to check the given log file(s) for.
## Note that adding patterns here increases processing time. The most

View File

@@ -114,7 +114,7 @@ style concurrently:
servers = ["tcp(127.0.0.1:3306)/"]
[[inputs.mysql]]
name_override = "_2"
name_suffix = "_v2"
metric_version = 2
servers = ["tcp(127.0.0.1:3306)/"]
@@ -141,7 +141,7 @@ measurement name.
metric_version = 2
[[inputs.mysql]]
name_override = "_2"
name_suffix = "_v2"
metric_version = 2
servers = ["tcp(127.0.0.1:3306)/"]

View File

@@ -608,7 +608,9 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu
}
// range over columns, and try to parse values
for i, col := range cols {
col = strings.ToLower(col)
if m.MetricVersion >= 2 {
col = strings.ToLower(col)
}
if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok {
fields["slave_"+col] = value
}

View File

@@ -7,6 +7,7 @@ import (
"os"
"runtime"
"strings"
"time"
"github.com/shirou/gopsutil/host"
"github.com/shirou/gopsutil/load"
@@ -43,7 +44,8 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
return err
}
acc.AddGauge("system", fields, nil)
now := time.Now()
acc.AddGauge("system", fields, nil, now)
hostinfo, err := host.Info()
if err != nil {
@@ -52,10 +54,10 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
acc.AddCounter("system", map[string]interface{}{
"uptime": hostinfo.Uptime,
}, nil)
}, nil, now)
acc.AddFields("system", map[string]interface{}{
"uptime_format": format_uptime(hostinfo.Uptime),
}, nil)
}, nil, now)
return nil
}

View File

@@ -7,6 +7,7 @@ import (
"encoding/binary"
"fmt"
"sort"
"strconv"
"strings"
"time"
@@ -16,6 +17,8 @@ import (
_ "github.com/jackc/pgx/stdlib"
)
const MaxInt64 = int64(^uint64(0) >> 1)
type CrateDB struct {
URL string
Timeout internal.Duration
@@ -115,11 +118,19 @@ func escapeValue(val interface{}) (string, error) {
switch t := val.(type) {
case string:
return escapeString(t, `'`), nil
// We don't handle uint, uint32 and uint64 here because CrateDB doesn't
// seem to support unsigned types. But it seems like input plugins don't
// produce those types, so it's hopefully ok.
case int, int32, int64, float32, float64:
case int64, float64:
return fmt.Sprint(t), nil
case uint64:
// The long type is the largest integer type in CrateDB and is the
// size of a signed int64. If our value is too large send the largest
// possible value.
if t <= uint64(MaxInt64) {
return strconv.FormatInt(int64(t), 10), nil
} else {
return strconv.FormatInt(MaxInt64, 10), nil
}
case bool:
return strconv.FormatBool(t), nil
case time.Time:
// see https://crate.io/docs/crate/reference/sql/data_types.html#timestamp
return escapeValue(t.Format("2006-01-02T15:04:05.999-0700"))

View File

@@ -111,12 +111,12 @@ func Test_escapeValue(t *testing.T) {
{`foo`, `'foo'`},
{`foo'bar 'yeah`, `'foo''bar ''yeah'`},
// int types
{123, `123`}, // int
{int64(123), `123`},
{int32(123), `123`},
{uint64(123), `123`},
{uint64(MaxInt64) + 1, `9223372036854775807`},
{true, `true`},
{false, `false`},
// float types
{123.456, `123.456`},
{float32(123.456), `123.456`}, // floating point SNAFU
{float64(123.456), `123.456`},
// time.Time
{time.Date(2017, 8, 7, 16, 44, 52, 123*1000*1000, time.FixedZone("Dreamland", 5400)), `'2017-08-07T16:44:52.123+0130'`},

View File

@@ -179,13 +179,9 @@ func verifyValue(v interface{}) bool {
func (p *Point) setValue(v interface{}) error {
switch d := v.(type) {
case int:
p[1] = float64(int(d))
case int32:
p[1] = float64(int32(d))
case int64:
p[1] = float64(int64(d))
case float32:
p[1] = float64(d)
case uint64:
p[1] = float64(d)
case float64:
p[1] = float64(d)
@@ -195,7 +191,7 @@ func (p *Point) setValue(v interface{}) error {
p[1] = float64(1)
}
default:
return fmt.Errorf("undeterminable type")
return fmt.Errorf("undeterminable field type: %T", v)
}
return nil
}

View File

@@ -168,6 +168,30 @@ func TestBuildPoint(t *testing.T) {
},
nil,
},
{
testutil.TestMetric(int64(0), "test int64"),
Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
0.0,
},
nil,
},
{
testutil.TestMetric(uint64(0), "test uint64"),
Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
0.0,
},
nil,
},
{
testutil.TestMetric(true, "test bool"),
Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
1.0,
},
nil,
},
}
for _, tt := range tagtests {
pt, err := buildMetrics(tt.ptIn)

View File

@@ -1,8 +1,9 @@
# Kafka Producer Output Plugin
# Kafka Output Plugin
This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html) acting a Kafka Producer.
```
### Configuration:
```toml
[[outputs.kafka]]
## URLs of kafka brokers
brokers = ["localhost:9092"]
@@ -45,7 +46,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
## 0 : No compression
## 1 : Gzip compression
## 2 : Snappy compression
compression_codec = 0
# compression_codec = 0
## RequiredAcks is used in Produce Requests to tell the broker how many
## replica acknowledgements it must see before responding
@@ -61,10 +62,11 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
## received the data. This option provides the best durability, we
## guarantee that no messages will be lost as long as at least one in
## sync replica remains.
required_acks = -1
# required_acks = -1
## The total number of times to retry sending a message
max_retry = 3
## The maximum number of times to retry sending a metric before failing
## until the next flush.
# max_retry = 3
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
@@ -77,24 +79,21 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
# sasl_username = "kafka"
# sasl_password = "secret"
data_format = "influx"
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
```
### Required parameters:
#### `max_retry`
* `brokers`: List of strings, this is for speaking to a cluster of `kafka` brokers. On each flush interval, Telegraf will randomly choose one of the urls to write to. Each URL should just include host and port e.g. -> `["{host}:{port}","{host2}:{port2}"]`
* `topic`: The `kafka` topic to publish to.
This option controls the number of retries before a failure notification is
displayed for each message when no acknowledgement is received from the
broker. When the setting is greater than `0`, message latency can be reduced,
duplicate messages can occur in cases of transient errors, and broker loads
can increase during downtime.
### Optional parameters:
* `routing_tag`: If this tag exists, its value will be used as the routing key
* `compression_codec`: What level of compression to use: `0` -> no compression, `1` -> gzip compression, `2` -> snappy compression
* `required_acks`: a setting for how may `acks` required from the `kafka` broker cluster.
* `max_retry`: Max number of times to retry failed write
* `ssl_ca`: SSL CA
* `ssl_cert`: SSL CERT
* `ssl_key`: SSL key
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
* `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md)
* `topic_suffix`: Which, if any, method of calculating `kafka` topic suffix to use.
For examples, please refer to sample configuration.
The option is similar to the
[retries](https://kafka.apache.org/documentation/#producerconfigs) Producer
option in the Java Kafka Producer.

View File

@@ -113,7 +113,7 @@ var sampleConfig = `
## 0 : No compression
## 1 : Gzip compression
## 2 : Snappy compression
compression_codec = 0
# compression_codec = 0
## RequiredAcks is used in Produce Requests to tell the broker how many
## replica acknowledgements it must see before responding
@@ -129,10 +129,11 @@ var sampleConfig = `
## received the data. This option provides the best durability, we
## guarantee that no messages will be lost as long as at least one in
## sync replica remains.
required_acks = -1
# required_acks = -1
## The total number of times to retry sending a message
max_retry = 3
## The maximum number of times to retry sending a metric before failing
## until the next flush.
# max_retry = 3
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
@@ -149,7 +150,7 @@ var sampleConfig = `
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
# data_format = "influx"
`
func ValidateTopicSuffixMethod(method string) error {

View File

@@ -12,6 +12,10 @@ import (
"github.com/influxdata/telegraf/metric"
)
var (
utf8BOM = []byte("\xef\xbb\xbf")
)
type JSONParser struct {
MetricName string
TagKeys []string
@@ -68,6 +72,7 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i
func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
buf = bytes.TrimSpace(buf)
buf = bytes.TrimPrefix(buf, utf8BOM)
if len(buf) == 0 {
return make([]telegraf.Metric, 0), nil
}

View File

@@ -428,3 +428,15 @@ func TestParseArrayWithTagKeys(t *testing.T) {
"othertag": "baz",
}, metrics[1].Tags())
}
var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]")
func TestHttpJsonBOM(t *testing.T) {
parser := JSONParser{
MetricName: "json_test",
}
// Most basic vanilla test
_, err := parser.Parse(jsonBOM)
assert.NoError(t, err)
}