Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1fb4283f31 | ||
|
|
3187d58d92 | ||
|
|
a8d33a26fb | ||
|
|
de3b60c2b7 | ||
|
|
469ba00e7d | ||
|
|
7e25f98eb5 | ||
|
|
f5e9ec8fc1 | ||
|
|
89d0d8455b | ||
|
|
81446d8cf3 | ||
|
|
7372e62083 | ||
|
|
6705b2bd94 | ||
|
|
f21f31a08a | ||
|
|
ae9e77dab8 | ||
|
|
c7a6d8e9a2 | ||
|
|
ad6f40c16c | ||
|
|
b7e63ac6e8 | ||
|
|
91352a29bb | ||
|
|
d45b64d7a0 | ||
|
|
d496ff16bf |
20
CHANGELOG.md
20
CHANGELOG.md
@@ -1,18 +1,12 @@
|
||||
## v1.6.1 [unreleased]
|
||||
## v1.7 [unreleased]
|
||||
## v1.6.2 [2018-05-08]
|
||||
|
||||
### New Inputs
|
||||
### Bugfixes
|
||||
|
||||
- [fibaro](./plugins/inputs/fibaro/README.md) - Contributed by @dynek
|
||||
- [nvidia_smi](./plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin
|
||||
|
||||
### Features
|
||||
|
||||
- [#3964](https://github.com/influxdata/telegraf/pull/3964): Add repl_oplog_window_sec metric to mongodb input.
|
||||
- [#3819](https://github.com/influxdata/telegraf/pull/3819): Add per-host shard metrics in mongodb input.
|
||||
- [#3999](https://github.com/influxdata/telegraf/pull/3999): Skip files with leading `..` in config directory.
|
||||
- [#4021](https://github.com/influxdata/telegraf/pull/4021): Add TLS support to socket_writer and socket_listener plugins.
|
||||
- [#4025](https://github.com/influxdata/telegraf/pull/4025): Add snmp input option to strip non fixed length index suffixes.
|
||||
- [#4078](https://github.com/influxdata/telegraf/pull/4078): Use same timestamp for fields in system input.
|
||||
- [#4091](https://github.com/influxdata/telegraf/pull/4091): Fix handling of uint64 in datadog output.
|
||||
- [#4099](https://github.com/influxdata/telegraf/pull/4099): Ignore UTF8 BOM in JSON parser.
|
||||
- [#4104](https://github.com/influxdata/telegraf/issues/4104): Fix case for slave metrics in mysql input.
|
||||
- [#4110](https://github.com/influxdata/telegraf/issues/4110): Fix uint support in cratedb output.
|
||||
|
||||
## v1.6.1 [2018-04-23]
|
||||
|
||||
|
||||
2
Godeps
2
Godeps
@@ -66,7 +66,7 @@ github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil a5c2888e464b14fa882c2a059e0f95716bd45cf1
|
||||
github.com/shirou/gopsutil c95755e4bcd7a62bb8bd33f3a597a7c7f35e2cf3
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
|
||||
@@ -79,15 +79,15 @@ services:
|
||||
- "389:389"
|
||||
- "636:636"
|
||||
crate:
|
||||
image: crate/crate
|
||||
ports:
|
||||
- "4200:4200"
|
||||
- "4230:4230"
|
||||
command:
|
||||
- crate
|
||||
- -Cnetwork.host=0.0.0.0
|
||||
- -Ctransport.host=localhost
|
||||
- -Clicense.enterprise=false
|
||||
environment:
|
||||
- CRATE_HEAP_SIZE=128m
|
||||
- JAVA_OPTS='-Xms256m -Xmx256m'
|
||||
image: crate/crate
|
||||
ports:
|
||||
- "4200:4200"
|
||||
- "4230:4230"
|
||||
- "5432:5432"
|
||||
command:
|
||||
- crate
|
||||
- -Cnetwork.host=0.0.0.0
|
||||
- -Ctransport.host=localhost
|
||||
- -Clicense.enterprise=false
|
||||
environment:
|
||||
- CRATE_HEAP_SIZE=128m
|
||||
|
||||
@@ -3017,7 +3017,6 @@
|
||||
# # watch_method = "inotify"
|
||||
#
|
||||
# ## Parse logstash-style "grok" patterns:
|
||||
# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
|
||||
# [inputs.logparser.grok]
|
||||
# ## This is a list of patterns to check the given log file(s) for.
|
||||
# ## Note that adding patterns here increases processing time. The most
|
||||
|
||||
@@ -70,7 +70,6 @@ const sampleConfig = `
|
||||
# watch_method = "inotify"
|
||||
|
||||
## Parse logstash-style "grok" patterns:
|
||||
## Telegraf built-in parsing patterns: https://goo.gl/dkay10
|
||||
[inputs.logparser.grok]
|
||||
## This is a list of patterns to check the given log file(s) for.
|
||||
## Note that adding patterns here increases processing time. The most
|
||||
|
||||
@@ -114,7 +114,7 @@ style concurrently:
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
|
||||
[[inputs.mysql]]
|
||||
name_override = "_2"
|
||||
name_suffix = "_v2"
|
||||
metric_version = 2
|
||||
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
@@ -141,7 +141,7 @@ measurement name.
|
||||
metric_version = 2
|
||||
|
||||
[[inputs.mysql]]
|
||||
name_override = "_2"
|
||||
name_suffix = "_v2"
|
||||
metric_version = 2
|
||||
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
|
||||
@@ -608,7 +608,9 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu
|
||||
}
|
||||
// range over columns, and try to parse values
|
||||
for i, col := range cols {
|
||||
col = strings.ToLower(col)
|
||||
if m.MetricVersion >= 2 {
|
||||
col = strings.ToLower(col)
|
||||
}
|
||||
if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok {
|
||||
fields["slave_"+col] = value
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/host"
|
||||
"github.com/shirou/gopsutil/load"
|
||||
@@ -43,7 +44,8 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
|
||||
acc.AddGauge("system", fields, nil)
|
||||
now := time.Now()
|
||||
acc.AddGauge("system", fields, nil, now)
|
||||
|
||||
hostinfo, err := host.Info()
|
||||
if err != nil {
|
||||
@@ -52,10 +54,10 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
acc.AddCounter("system", map[string]interface{}{
|
||||
"uptime": hostinfo.Uptime,
|
||||
}, nil)
|
||||
}, nil, now)
|
||||
acc.AddFields("system", map[string]interface{}{
|
||||
"uptime_format": format_uptime(hostinfo.Uptime),
|
||||
}, nil)
|
||||
}, nil, now)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -16,6 +17,8 @@ import (
|
||||
_ "github.com/jackc/pgx/stdlib"
|
||||
)
|
||||
|
||||
const MaxInt64 = int64(^uint64(0) >> 1)
|
||||
|
||||
type CrateDB struct {
|
||||
URL string
|
||||
Timeout internal.Duration
|
||||
@@ -115,11 +118,19 @@ func escapeValue(val interface{}) (string, error) {
|
||||
switch t := val.(type) {
|
||||
case string:
|
||||
return escapeString(t, `'`), nil
|
||||
// We don't handle uint, uint32 and uint64 here because CrateDB doesn't
|
||||
// seem to support unsigned types. But it seems like input plugins don't
|
||||
// produce those types, so it's hopefully ok.
|
||||
case int, int32, int64, float32, float64:
|
||||
case int64, float64:
|
||||
return fmt.Sprint(t), nil
|
||||
case uint64:
|
||||
// The long type is the largest integer type in CrateDB and is the
|
||||
// size of a signed int64. If our value is too large send the largest
|
||||
// possible value.
|
||||
if t <= uint64(MaxInt64) {
|
||||
return strconv.FormatInt(int64(t), 10), nil
|
||||
} else {
|
||||
return strconv.FormatInt(MaxInt64, 10), nil
|
||||
}
|
||||
case bool:
|
||||
return strconv.FormatBool(t), nil
|
||||
case time.Time:
|
||||
// see https://crate.io/docs/crate/reference/sql/data_types.html#timestamp
|
||||
return escapeValue(t.Format("2006-01-02T15:04:05.999-0700"))
|
||||
|
||||
@@ -111,12 +111,12 @@ func Test_escapeValue(t *testing.T) {
|
||||
{`foo`, `'foo'`},
|
||||
{`foo'bar 'yeah`, `'foo''bar ''yeah'`},
|
||||
// int types
|
||||
{123, `123`}, // int
|
||||
{int64(123), `123`},
|
||||
{int32(123), `123`},
|
||||
{uint64(123), `123`},
|
||||
{uint64(MaxInt64) + 1, `9223372036854775807`},
|
||||
{true, `true`},
|
||||
{false, `false`},
|
||||
// float types
|
||||
{123.456, `123.456`},
|
||||
{float32(123.456), `123.456`}, // floating point SNAFU
|
||||
{float64(123.456), `123.456`},
|
||||
// time.Time
|
||||
{time.Date(2017, 8, 7, 16, 44, 52, 123*1000*1000, time.FixedZone("Dreamland", 5400)), `'2017-08-07T16:44:52.123+0130'`},
|
||||
|
||||
@@ -179,13 +179,9 @@ func verifyValue(v interface{}) bool {
|
||||
|
||||
func (p *Point) setValue(v interface{}) error {
|
||||
switch d := v.(type) {
|
||||
case int:
|
||||
p[1] = float64(int(d))
|
||||
case int32:
|
||||
p[1] = float64(int32(d))
|
||||
case int64:
|
||||
p[1] = float64(int64(d))
|
||||
case float32:
|
||||
p[1] = float64(d)
|
||||
case uint64:
|
||||
p[1] = float64(d)
|
||||
case float64:
|
||||
p[1] = float64(d)
|
||||
@@ -195,7 +191,7 @@ func (p *Point) setValue(v interface{}) error {
|
||||
p[1] = float64(1)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("undeterminable type")
|
||||
return fmt.Errorf("undeterminable field type: %T", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -168,6 +168,30 @@ func TestBuildPoint(t *testing.T) {
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(int64(0), "test int64"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
0.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(uint64(0), "test uint64"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
0.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
testutil.TestMetric(true, "test bool"),
|
||||
Point{
|
||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||
1.0,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tagtests {
|
||||
pt, err := buildMetrics(tt.ptIn)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# Kafka Producer Output Plugin
|
||||
# Kafka Output Plugin
|
||||
|
||||
This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html) acting a Kafka Producer.
|
||||
|
||||
```
|
||||
### Configuration:
|
||||
```toml
|
||||
[[outputs.kafka]]
|
||||
## URLs of kafka brokers
|
||||
brokers = ["localhost:9092"]
|
||||
@@ -45,7 +46,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
|
||||
## 0 : No compression
|
||||
## 1 : Gzip compression
|
||||
## 2 : Snappy compression
|
||||
compression_codec = 0
|
||||
# compression_codec = 0
|
||||
|
||||
## RequiredAcks is used in Produce Requests to tell the broker how many
|
||||
## replica acknowledgements it must see before responding
|
||||
@@ -61,10 +62,11 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
|
||||
## received the data. This option provides the best durability, we
|
||||
## guarantee that no messages will be lost as long as at least one in
|
||||
## sync replica remains.
|
||||
required_acks = -1
|
||||
# required_acks = -1
|
||||
|
||||
## The total number of times to retry sending a message
|
||||
max_retry = 3
|
||||
## The maximum number of times to retry sending a metric before failing
|
||||
## until the next flush.
|
||||
# max_retry = 3
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -77,24 +79,21 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm
|
||||
# sasl_username = "kafka"
|
||||
# sasl_password = "secret"
|
||||
|
||||
data_format = "influx"
|
||||
## Data format to output.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
```
|
||||
|
||||
### Required parameters:
|
||||
#### `max_retry`
|
||||
|
||||
* `brokers`: List of strings, this is for speaking to a cluster of `kafka` brokers. On each flush interval, Telegraf will randomly choose one of the urls to write to. Each URL should just include host and port e.g. -> `["{host}:{port}","{host2}:{port2}"]`
|
||||
* `topic`: The `kafka` topic to publish to.
|
||||
This option controls the number of retries before a failure notification is
|
||||
displayed for each message when no acknowledgement is received from the
|
||||
broker. When the setting is greater than `0`, message latency can be reduced,
|
||||
duplicate messages can occur in cases of transient errors, and broker loads
|
||||
can increase during downtime.
|
||||
|
||||
### Optional parameters:
|
||||
|
||||
* `routing_tag`: If this tag exists, its value will be used as the routing key
|
||||
* `compression_codec`: What level of compression to use: `0` -> no compression, `1` -> gzip compression, `2` -> snappy compression
|
||||
* `required_acks`: a setting for how may `acks` required from the `kafka` broker cluster.
|
||||
* `max_retry`: Max number of times to retry failed write
|
||||
* `ssl_ca`: SSL CA
|
||||
* `ssl_cert`: SSL CERT
|
||||
* `ssl_key`: SSL key
|
||||
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
|
||||
* `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md)
|
||||
* `topic_suffix`: Which, if any, method of calculating `kafka` topic suffix to use.
|
||||
For examples, please refer to sample configuration.
|
||||
The option is similar to the
|
||||
[retries](https://kafka.apache.org/documentation/#producerconfigs) Producer
|
||||
option in the Java Kafka Producer.
|
||||
|
||||
@@ -113,7 +113,7 @@ var sampleConfig = `
|
||||
## 0 : No compression
|
||||
## 1 : Gzip compression
|
||||
## 2 : Snappy compression
|
||||
compression_codec = 0
|
||||
# compression_codec = 0
|
||||
|
||||
## RequiredAcks is used in Produce Requests to tell the broker how many
|
||||
## replica acknowledgements it must see before responding
|
||||
@@ -129,10 +129,11 @@ var sampleConfig = `
|
||||
## received the data. This option provides the best durability, we
|
||||
## guarantee that no messages will be lost as long as at least one in
|
||||
## sync replica remains.
|
||||
required_acks = -1
|
||||
# required_acks = -1
|
||||
|
||||
## The total number of times to retry sending a message
|
||||
max_retry = 3
|
||||
## The maximum number of times to retry sending a metric before failing
|
||||
## until the next flush.
|
||||
# max_retry = 3
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -149,7 +150,7 @@ var sampleConfig = `
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
# data_format = "influx"
|
||||
`
|
||||
|
||||
func ValidateTopicSuffixMethod(method string) error {
|
||||
|
||||
@@ -12,6 +12,10 @@ import (
|
||||
"github.com/influxdata/telegraf/metric"
|
||||
)
|
||||
|
||||
var (
|
||||
utf8BOM = []byte("\xef\xbb\xbf")
|
||||
)
|
||||
|
||||
type JSONParser struct {
|
||||
MetricName string
|
||||
TagKeys []string
|
||||
@@ -68,6 +72,7 @@ func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]i
|
||||
|
||||
func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
buf = bytes.TrimSpace(buf)
|
||||
buf = bytes.TrimPrefix(buf, utf8BOM)
|
||||
if len(buf) == 0 {
|
||||
return make([]telegraf.Metric, 0), nil
|
||||
}
|
||||
|
||||
@@ -428,3 +428,15 @@ func TestParseArrayWithTagKeys(t *testing.T) {
|
||||
"othertag": "baz",
|
||||
}, metrics[1].Tags())
|
||||
}
|
||||
|
||||
var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]")
|
||||
|
||||
func TestHttpJsonBOM(t *testing.T) {
|
||||
parser := JSONParser{
|
||||
MetricName: "json_test",
|
||||
}
|
||||
|
||||
// Most basic vanilla test
|
||||
_, err := parser.Parse(jsonBOM)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user