Compare commits

..

37 Commits

Author SHA1 Message Date
Daniel Nelson
2de7aa23d7 Set 1.4.1 release date in changelog
(cherry picked from commit fd702e6bb8)
2017-09-26 14:19:51 -07:00
Daniel Nelson
52cd38150c Update changelog
(cherry picked from commit 0048bf2120)
2017-09-18 14:25:57 -07:00
Daniel Nelson
c08f492f78 Fix arm64 packages contain 32-bit executable (#3246)
(cherry picked from commit b8e134cd37)
2017-09-18 14:25:57 -07:00
Daniel Nelson
66cfe80e37 Update changelog
(cherry picked from commit b94cda6b46)
2017-09-14 15:30:51 -07:00
Trevor Pounds
ba5e5ec283 Fix panic in statsd p100 calculation (#3230)
(cherry picked from commit 73372872c2)
2017-09-14 15:30:51 -07:00
Daniel Nelson
259f8e4002 Update changelog
(cherry picked from commit 875ab3c4b7)
2017-09-14 15:05:38 -07:00
Mark Wilkinson - m82labs
558ab0c730 Fix duplicate keys in perf counters sqlserver query (#3175)
(cherry picked from commit 1c5ebd4be3)
2017-09-14 15:05:38 -07:00
Daniel Nelson
8d4fbe29e7 Update changelog
(cherry picked from commit 103d24bfba)
2017-09-14 15:01:28 -07:00
Daniel Nelson
72337a1c97 Fix skipped line with empty target in iptables (#3235)
(cherry picked from commit d5f48e3e96)
2017-09-14 15:01:21 -07:00
Daniel Nelson
86537899b2 Update changelog
(cherry picked from commit 7a41d2c586)
2017-09-14 13:07:30 -07:00
Trevor Pounds
a727d5d1f0 Fix counter and gauge metric types. (#3232)
(cherry picked from commit fa1982323a)
2017-09-14 13:07:30 -07:00
Daniel Nelson
7ec194a482 Update changelog
(cherry picked from commit cdf63c5776)
2017-09-13 17:32:03 -07:00
Daniel Nelson
5a77d28837 Whitelist allowed char classes for opentsdb output. (#3227)
(cherry picked from commit 0a8c2e0b3b)
2017-09-13 17:32:03 -07:00
Daniel Nelson
47927c353d Fix fluentd test
(cherry picked from commit eebee9759f)
2017-09-12 17:58:29 -07:00
Daniel Nelson
b9e7fa27aa Update changelog
(cherry picked from commit c5cfde667a)
2017-09-12 17:18:29 -07:00
Daniel Nelson
0d437140bd Fix optional field types in fluentd input
(cherry picked from commit 8a68e7424c)
2017-09-12 17:18:29 -07:00
Daniel Nelson
36969a63c2 Update changelog
(cherry picked from commit cc63b3b667)
2017-09-11 12:28:37 -07:00
DanKans
e9a12bb694 Fix MQTT input exits if Broker is not available on startup (#3202)
(cherry picked from commit 5488f4b3ac)
2017-09-11 12:28:12 -07:00
Daniel Nelson
34b7a4c361 Add 1.4.0 release date
(cherry picked from commit ab1c11b06d)
2017-09-05 17:15:06 -07:00
Daniel Nelson
f46370d982 Sort metrics before comparing in graphite test
(cherry picked from commit 98e784faf3)
2017-09-05 12:50:55 -07:00
Daniel Nelson
07b7e09749 Update changelog
(cherry picked from commit f43af72785)
2017-08-31 13:44:05 -07:00
Daniel Nelson
e54795795d Fix panic when handling string fields with escapes (#3188)
(cherry picked from commit 28d16188b3)
2017-08-30 21:17:10 -07:00
Daniel Nelson
b2b2bd8a27 Update changelog 2017-08-29 16:30:25 -07:00
Daniel Nelson
f96cbb48c7 Convert bool fields to int in graphite serializer 2017-08-29 16:30:25 -07:00
Seua Polyakov
9077cb83bc Skip non-numerical values in graphite format (#3179) 2017-08-29 16:30:25 -07:00
Daniel Nelson
0f188f280f Update changelog 2017-08-28 17:18:00 -07:00
Dylan Meissner
b9420e73bd HTTP headers can be added to InfluxDB output (#3182)
(cherry picked from commit a9a40cbf87)
2017-08-28 17:15:43 -07:00
Daniel Nelson
1e43e5e7ae Update changelog
(cherry picked from commit 5fd8ab36d3)
2017-08-28 17:09:08 -07:00
Jeff Nickoloff
5e104ad974 Added CloudWatch metric constraint validation (#3183)
(cherry picked from commit ac1fa05672)
2017-08-28 17:09:08 -07:00
Daniel Nelson
cc9d8c700c Update changelog
(cherry picked from commit a98496591a)
2017-08-25 18:08:55 -07:00
Ashton Kinslow
b15ec21ba7 Fix NSQ input plugin when used with version 1.0.0-compat
(cherry picked from commit 0a6541dfa8)
2017-08-25 18:08:55 -07:00
Daniel Nelson
a9abfe8f08 Update changelog
(cherry picked from commit 6abecd0ac7)
2017-08-25 12:59:51 -07:00
Rickard von Essen
307210242c Don't fail parsing of zpool stats if pool health is UNAVAIL on FreeBSD (#3149)
(cherry picked from commit 0502b65316)
2017-08-25 12:59:38 -07:00
Daniel Nelson
0a41db16f1 Update changelog
(cherry picked from commit e400fcf5da)
2017-08-25 11:56:30 -07:00
Jan Willem Janssen
7480267fd2 Fix parsing of SHM remotes in ntpq input (#3163)
(cherry picked from commit d449833de9)
2017-08-25 11:56:27 -07:00
Daniel Nelson
30949c4596 Update fail2ban documentation
(cherry picked from commit 58751fa4df)
2017-08-25 11:43:49 -07:00
Daniel Nelson
47264bc860 Fix amqp_consumer data_format documentation
closes #3164

(cherry picked from commit 656ce31d98)
2017-08-24 13:18:23 -07:00
38 changed files with 910 additions and 216 deletions

View File

@@ -1,4 +1,17 @@
## v1.4 [unreleased]
## v1.4.1 [2017-09-26]
### Bugfixes
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
- [#3227](https://github.com/influxdata/telegraf/issues/3227): Whitelist allowed char classes for opentsdb output.
- [#3232](https://github.com/influxdata/telegraf/issues/3232): Fix counter and gauge metric types.
- [#3235](https://github.com/influxdata/telegraf/issues/3235): Fix skipped line with empty target in iptables.
- [#3175](https://github.com/influxdata/telegraf/issues/3175): Fix duplicate keys in perf counters sqlserver query.
- [#3230](https://github.com/influxdata/telegraf/issues/3230): Fix panic in statsd p100 calculation.
- [#3242](https://github.com/influxdata/telegraf/issues/3242): Fix arm64 packages contain 32-bit executable.
## v1.4 [2017-09-05]
### Release Notes
@@ -62,6 +75,7 @@
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
### Bugfixes
@@ -97,6 +111,12 @@
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
- [#3187](https://github.com/influxdata/telegraf/issues/3187): Fix panic when handling string fields with escapes.
## v1.3.5 [2017-07-26]

View File

@@ -96,6 +96,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
```
Fields with string values will be skipped. Boolean fields will be converted
to 1 (true) or 0 (false).
### Graphite Configuration:
```toml

View File

@@ -150,12 +150,6 @@ func makemetric(
continue
}
case string:
if strings.HasSuffix(val, `\`) {
log.Printf("D! Measurement [%s] field [%s] has a value "+
"ending with a backslash, skipping", measurement, k)
delete(fields, k)
continue
}
fields[k] = v
default:
fields[k] = v

View File

@@ -370,16 +370,17 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
expectedTags: map[string]string{},
},
{
name: "Field value with trailing slash dropped",
name: "Field value with trailing slash okay",
measurement: `cpu`,
fields: map[string]interface{}{
"value": int64(42),
"bad": `xyzzy\`,
"ok": `xyzzy\`,
},
tags: map[string]string{},
expectedMeasurement: `cpu`,
expectedFields: map[string]interface{}{
"value": int64(42),
"ok": `xyzzy\`,
},
expectedTags: map[string]string{},
},
@@ -387,7 +388,7 @@ func TestMakeMetric_TrailingSlash(t *testing.T) {
name: "Must have one field after dropped",
measurement: `cpu`,
fields: map[string]interface{}{
"bad": `xyzzy\`,
"bad": math.NaN(),
},
tags: map[string]string{},
expectedNil: true,

View File

@@ -21,14 +21,14 @@ func New(
t time.Time,
mType ...telegraf.ValueType,
) (telegraf.Metric, error) {
if len(fields) == 0 {
return nil, fmt.Errorf("Metric cannot be made without any fields")
}
if len(name) == 0 {
return nil, fmt.Errorf("Metric cannot be made with an empty name")
return nil, fmt.Errorf("missing measurement name")
}
if len(fields) == 0 {
return nil, fmt.Errorf("%s: must have one or more fields", name)
}
if strings.HasSuffix(name, `\`) {
return nil, fmt.Errorf("Metric cannot have measurement name ending with a backslash")
return nil, fmt.Errorf("%s: measurement name cannot end with a backslash", name)
}
var thisType telegraf.ValueType
@@ -49,10 +49,10 @@ func New(
taglen := 0
for k, v := range tags {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("Metric cannot have tag key ending with a backslash")
return nil, fmt.Errorf("%s: tag key cannot end with a backslash: %s", name, k)
}
if strings.HasSuffix(v, `\`) {
return nil, fmt.Errorf("Metric cannot have tag value ending with a backslash")
return nil, fmt.Errorf("%s: tag value cannot end with a backslash: %s", name, v)
}
if len(k) == 0 || len(v) == 0 {
@@ -79,7 +79,7 @@ func New(
fieldlen := 0
for k, _ := range fields {
if strings.HasSuffix(k, `\`) {
return nil, fmt.Errorf("Metric cannot have field key ending with a backslash")
return nil, fmt.Errorf("%s: field key cannot end with a backslash: %s", name, k)
}
// 10 bytes is completely arbitrary, but will at least prevent some
@@ -102,7 +102,8 @@ func New(
}
// indexUnescapedByte finds the index of the first byte equal to b in buf that
// is not escaped. Returns -1 if not found.
// is not escaped. Does not allow the escape char to be escaped. Returns -1 if
// not found.
func indexUnescapedByte(buf []byte, b byte) int {
var keyi int
for {
@@ -122,6 +123,46 @@ func indexUnescapedByte(buf []byte, b byte) int {
return keyi
}
// indexUnescapedByteBackslashEscaping finds the index of the first byte equal
// to b in buf that is not escaped. Allows for the escape char `\` to be
// escaped. Returns -1 if not found.
func indexUnescapedByteBackslashEscaping(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
if i == -1 {
return -1
} else if i == 0 {
break
}
keyi += i
if countBackslashes(buf, keyi-1)%2 == 0 {
break
} else {
keyi++
}
}
return keyi
}
// countBackslashes counts the number of preceding backslashes starting at
// the 'start' index.
func countBackslashes(buf []byte, index int) int {
var count int
for {
if index < 0 {
return count
}
if buf[index] == '\\' {
count++
index--
} else {
break
}
}
return count
}
type metric struct {
name []byte
tags []byte
@@ -283,7 +324,7 @@ func (m *metric) Fields() map[string]interface{} {
// end index of field value
var i3 int
if m.fields[i:][i2] == '"' {
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
i3 = indexUnescapedByteBackslashEscaping(m.fields[i:][i2+1:], '"')
if i3 == -1 {
i3 = len(m.fields[i:])
}

View File

@@ -258,6 +258,7 @@ func TestNewMetric_Fields(t *testing.T) {
"quote_string": `x"y`,
"backslash_quote_string": `x\"y`,
"backslash": `x\y`,
"ends_with_backslash": `x\`,
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)

View File

@@ -39,9 +39,9 @@ The following defaults are known to work with RabbitMQ:
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```

View File

@@ -85,10 +85,10 @@ func (a *AMQPConsumer) SampleConfig() string {
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to output.
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
}

View File

@@ -1,19 +1,19 @@
# Fail2ban Plugin
# Fail2ban Input Plugin
The fail2ban plugin gathers counts of failed and banned ip addresses from fail2ban.
The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org).
This plugin run fail2ban-client command, and fail2ban-client require root access.
You have to grant telegraf to run fail2ban-client:
This plugin runs the `fail2ban-client` command which generally requires root access.
Acquiring the required permissions can be done using several methods:
- Run telegraf as root. (deprecate)
- Configure sudo to grant telegraf to fail2ban-client.
- Use sudo run fail2ban-client.
- Run telegraf as root. (not recommended)
### Using sudo
You may edit your sudo configuration with the following:
``` sudo
telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status *
```
### Configuration:
@@ -21,10 +21,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
``` toml
# Read metrics from fail2ban.
[[inputs.fail2ban]]
## fail2ban-client require root access.
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
## This plugin run only "fail2ban-client status".
## Use sudo to run fail2ban-client
use_sudo = false
```
@@ -38,7 +35,7 @@ telegraf ALL=(root) NOPASSWD: /usr/bin/fail2ban-client status *
- All measurements have the following tags:
- jail
### Example Output:
```
@@ -55,6 +52,5 @@ Status for the jail: sshd
```
```
$ ./telegraf --config telegraf.conf --input-filter fail2ban --test
fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000
```

View File

@@ -6,9 +6,10 @@ import (
"os/exec"
"strings"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"strconv"
)
var (
@@ -21,10 +22,7 @@ type Fail2ban struct {
}
var sampleConfig = `
## fail2ban-client require root access.
## Setting 'use_sudo' to true will make use of sudo to run fail2ban-client.
## Users must configure sudo to allow telegraf user to run fail2ban-client with no password.
## This plugin run only "fail2ban-client status".
## Use sudo to run fail2ban-client
use_sudo = false
`

View File

@@ -148,15 +148,15 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error {
}
if p.BufferQueueLength != nil {
tmpFields["buffer_queue_length"] = p.BufferQueueLength
tmpFields["buffer_queue_length"] = *p.BufferQueueLength
}
if p.RetryCount != nil {
tmpFields["retry_count"] = p.RetryCount
tmpFields["retry_count"] = *p.RetryCount
}
if p.BufferTotalQueuedSize != nil {
tmpFields["buffer_total_queued_size"] = p.BufferTotalQueuedSize
tmpFields["buffer_total_queued_size"] = *p.BufferTotalQueuedSize
}
if !((p.BufferQueueLength == nil) && (p.RetryCount == nil) && (p.BufferTotalQueuedSize == nil)) {

View File

@@ -122,12 +122,6 @@ func Test_parse(t *testing.T) {
}
func Test_Gather(t *testing.T) {
if testing.Short() {
t.Skip("Skipping Gather function test")
}
t.Log("Testing Gather function")
t.Logf("Start HTTP mock (%s) with sampleJSON", fluentdTest.Endpoint)
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -157,13 +151,13 @@ func Test_Gather(t *testing.T) {
assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"])
assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"])
assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"])
assert.Equal(t, expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
assert.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"])
assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"])
assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"])
assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"])
assert.Equal(t, expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
assert.Equal(t, expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
assert.Equal(t, expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"])
assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"])
assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"])
}

View File

@@ -95,7 +95,7 @@ const measurement = "iptables"
var errParse = errors.New("Cannot parse iptables list information")
var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`)
var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`)
var commentRe = regexp.MustCompile(`\s*/\*\s*(.+?)\s*\*/\s*`)
var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+.*?/\*\s*(.+?)\s*\*/\s*`)
func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error {
lines := strings.Split(data, "\n")
@@ -110,21 +110,14 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error
return errParse
}
for _, line := range lines[2:] {
tokens := strings.Fields(line)
if len(tokens) < 10 {
matches := valuesRe.FindStringSubmatch(line)
if len(matches) != 4 {
continue
}
pkts := tokens[0]
bytes := tokens[1]
end := strings.Join(tokens[9:], " ")
matches := commentRe.FindStringSubmatch(end)
if matches == nil {
continue
}
comment := matches[1]
pkts := matches[1]
bytes := matches[2]
comment := matches[3]
tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment}
fields := make(map[string]interface{})

View File

@@ -154,68 +154,85 @@ func TestIptables_Gather(t *testing.T) {
tags: []map[string]string{},
fields: [][]map[string]interface{}{},
},
{ // 11 - all target and ports
table: "all_recv",
chains: []string{"accountfwd"},
values: []string{
`Chain accountfwd (1 references)
pkts bytes target prot opt in out source destination
123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */
`},
tags: []map[string]string{
map[string]string{"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"},
},
fields: [][]map[string]interface{}{
{map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}},
},
},
}
for i, tt := range tests {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
acc := new(testutil.Accumulator)
err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
t.Run(tt.table, func(t *testing.T) {
i++
ipt := &Iptables{
Table: tt.table,
Chains: tt.chains,
lister: func(table, chain string) (string, error) {
if len(tt.values) > 0 {
v := tt.values[0]
tt.values = tt.values[1:]
return v, nil
}
return "", nil
},
}
continue
}
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
acc := new(testutil.Accumulator)
err := acc.GatherError(ipt.Gather)
if !reflect.DeepEqual(tt.err, err) {
t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err)
}
continue
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
if tt.table == "" {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty table got %d", i, n)
}
return
}
continue
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
if len(tt.chains) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 fields if empty chains got %d", i, n)
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
return
}
}
if len(tt.tags) == 0 {
n := acc.NFields()
if n != 0 {
t.Errorf("%d: expected 0 values got %d", i, n)
}
return
}
n := 0
for j, tags := range tt.tags {
for k, fields := range tt.fields[j] {
if len(acc.Metrics) < n+1 {
t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics))
break
}
m := acc.Metrics[n]
if !reflect.DeepEqual(m.Measurement, measurement) {
t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement)
}
if !reflect.DeepEqual(m.Tags, tags) {
t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags)
}
if !reflect.DeepEqual(m.Fields, fields) {
t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields)
}
n++
}
}
})
}
}

View File

@@ -13,6 +13,8 @@ The plugin expects messages in the
servers = ["localhost:1883"]
## MQTT QoS, must be 0, 1, or 2
qos = 0
## Connection timeout for initial connection in seconds
connection_timeout = 30
## Topics to subscribe to
topics = [

View File

@@ -16,11 +16,12 @@ import (
)
type MQTTConsumer struct {
Servers []string
Topics []string
Username string
Password string
QoS int `toml:"qos"`
Servers []string
Topics []string
Username string
Password string
QoS int `toml:"qos"`
ConnectionTimeout internal.Duration `toml:"connection_timeout"`
parser parsers.Parser
@@ -48,13 +49,15 @@ type MQTTConsumer struct {
// keep the accumulator internally:
acc telegraf.Accumulator
started bool
connected bool
}
var sampleConfig = `
servers = ["localhost:1883"]
## MQTT QoS, must be 0, 1, or 2
qos = 0
## Connection timeout for initial connection in seconds
connection_timeout = 30
## Topics to subscribe to
topics = [
@@ -103,7 +106,7 @@ func (m *MQTTConsumer) SetParser(parser parsers.Parser) {
func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
m.Lock()
defer m.Unlock()
m.started = false
m.connected = false
if m.PersistentSession && m.ClientID == "" {
return fmt.Errorf("ERROR MQTT Consumer: When using persistent_session" +
@@ -115,26 +118,40 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS)
}
if int(m.ConnectionTimeout.Duration) <= 0 {
return fmt.Errorf("MQTT Consumer, invalid connection_timeout value: %d", m.ConnectionTimeout)
}
opts, err := m.createOpts()
if err != nil {
return err
}
m.client = mqtt.NewClient(opts)
if token := m.client.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
m.in = make(chan mqtt.Message, 1000)
m.done = make(chan struct{})
m.connect()
return nil
}
func (m *MQTTConsumer) connect() error {
if token := m.client.Connect(); token.Wait() && token.Error() != nil {
err := token.Error()
log.Printf("D! MQTT Consumer, connection error - %v", err)
return err
}
go m.receiver()
return nil
}
func (m *MQTTConsumer) onConnect(c mqtt.Client) {
log.Printf("I! MQTT Client Connected")
if !m.PersistentSession || !m.started {
if !m.PersistentSession || !m.connected {
topics := make(map[string]byte)
for _, topic := range m.Topics {
topics[topic] = byte(m.QoS)
@@ -145,7 +162,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
m.acc.AddError(fmt.Errorf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s",
strings.Join(m.Topics[:], ","), subscribeToken.Error()))
}
m.started = true
m.connected = true
}
return
}
@@ -186,18 +203,27 @@ func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) {
func (m *MQTTConsumer) Stop() {
m.Lock()
defer m.Unlock()
close(m.done)
m.client.Disconnect(200)
m.started = false
if m.connected {
close(m.done)
m.client.Disconnect(200)
m.connected = false
}
}
func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error {
if !m.connected {
m.connect()
}
return nil
}
func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
opts := mqtt.NewClientOptions()
opts.ConnectTimeout = m.ConnectionTimeout.Duration
if m.ClientID == "" {
opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5))
} else {
@@ -238,6 +264,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
opts.SetCleanSession(!m.PersistentSession)
opts.SetOnConnectHandler(m.onConnect)
opts.SetConnectionLostHandler(m.onConnectionLost)
return opts, nil
}

View File

@@ -22,11 +22,13 @@ const (
func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) {
in := make(chan mqtt.Message, 100)
n := &MQTTConsumer{
Topics: []string{"telegraf"},
Servers: []string{"localhost:1883"},
in: in,
done: make(chan struct{}),
Topics: []string{"telegraf"},
Servers: []string{"localhost:1883"},
in: in,
done: make(chan struct{}),
connected: true,
}
return n, in
}
@@ -131,6 +133,7 @@ func TestRunParserAndGather(t *testing.T) {
n, in := newTestMQTTConsumer()
acc := testutil.Accumulator{}
n.acc = &acc
defer close(n.done)
n.parser, _ = parsers.NewInfluxParser()

View File

@@ -25,6 +25,7 @@ package nsq
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
@@ -101,28 +102,42 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error {
return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status)
}
s := &NSQStats{}
err = json.NewDecoder(r.Body).Decode(s)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return fmt.Errorf(`Error reading body: %s`, err)
}
data := &NSQStatsData{}
err = json.Unmarshal(body, data)
if err != nil {
return fmt.Errorf(`Error parsing response: %s`, err)
}
// Data was not parsed correctly attempt to use old format.
if len(data.Version) < 1 {
wrapper := &NSQStats{}
err = json.Unmarshal(body, wrapper)
if err != nil {
return fmt.Errorf(`Error parsing response: %s`, err)
}
data = &wrapper.Data
}
tags := map[string]string{
`server_host`: u.Host,
`server_version`: s.Data.Version,
`server_version`: data.Version,
}
fields := make(map[string]interface{})
if s.Data.Health == `OK` {
if data.Health == `OK` {
fields["server_count"] = int64(1)
} else {
fields["server_count"] = int64(0)
}
fields["topic_count"] = int64(len(s.Data.Topics))
fields["topic_count"] = int64(len(data.Topics))
acc.AddFields("nsq_server", fields, tags)
for _, t := range s.Data.Topics {
topicStats(t, acc, u.Host, s.Data.Version)
for _, t := range data.Topics {
topicStats(t, acc, u.Host, data.Version)
}
return nil
@@ -189,7 +204,6 @@ func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic,
"server_version": version,
"topic": topic,
"channel": channel,
"client_name": c.Name,
"client_id": c.ID,
"client_hostname": c.Hostname,
"client_version": c.Version,
@@ -199,6 +213,9 @@ func clientStats(c ClientStats, acc telegraf.Accumulator, host, version, topic,
"client_snappy": strconv.FormatBool(c.Snappy),
"client_deflate": strconv.FormatBool(c.Deflate),
}
if len(c.Name) > 0 {
tags["client_name"] = c.Name
}
fields := map[string]interface{}{
"ready_count": c.ReadyCount,
@@ -248,7 +265,7 @@ type ChannelStats struct {
}
type ClientStats struct {
Name string `json:"name"`
Name string `json:"name"` // DEPRECATED 1.x+, still here as the structs are currently being shared for parsing v3.x and 1.x
ID string `json:"client_id"`
Hostname string `json:"hostname"`
Version string `json:"version"`

View File

@@ -12,10 +12,267 @@ import (
"github.com/stretchr/testify/require"
)
func TestNSQStats(t *testing.T) {
func TestNSQStatsV1(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, response)
fmt.Fprintln(w, responseV1)
}))
defer ts.Close()
n := &NSQ{
Endpoints: []string{ts.URL},
}
var acc testutil.Accumulator
err := acc.GatherError(n.Gather)
require.NoError(t, err)
u, err := url.Parse(ts.URL)
require.NoError(t, err)
host := u.Host
// actually validate the tests
tests := []struct {
m string
f map[string]interface{}
g map[string]string
}{
{
"nsq_server",
map[string]interface{}{
"server_count": int64(1),
"topic_count": int64(2),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
},
},
{
"nsq_topic",
map[string]interface{}{
"depth": int64(12),
"backend_depth": int64(13),
"message_count": int64(14),
"channel_count": int64(1),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
"topic": "t1"},
},
{
"nsq_channel",
map[string]interface{}{
"depth": int64(0),
"backend_depth": int64(1),
"inflight_count": int64(2),
"deferred_count": int64(3),
"message_count": int64(4),
"requeue_count": int64(5),
"timeout_count": int64(6),
"client_count": int64(1),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
"topic": "t1",
"channel": "c1",
},
},
{
"nsq_client",
map[string]interface{}{
"ready_count": int64(200),
"inflight_count": int64(7),
"message_count": int64(8),
"finish_count": int64(9),
"requeue_count": int64(10),
},
map[string]string{"server_host": host, "server_version": "1.0.0-compat",
"topic": "t1", "channel": "c1",
"client_id": "373a715cd990", "client_hostname": "373a715cd990",
"client_version": "V2", "client_address": "172.17.0.11:35560",
"client_tls": "false", "client_snappy": "false",
"client_deflate": "false",
"client_user_agent": "nsq_to_nsq/0.3.6 go-nsq/1.0.5"},
},
{
"nsq_topic",
map[string]interface{}{
"depth": int64(28),
"backend_depth": int64(29),
"message_count": int64(30),
"channel_count": int64(1),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
"topic": "t2"},
},
{
"nsq_channel",
map[string]interface{}{
"depth": int64(15),
"backend_depth": int64(16),
"inflight_count": int64(17),
"deferred_count": int64(18),
"message_count": int64(19),
"requeue_count": int64(20),
"timeout_count": int64(21),
"client_count": int64(1),
},
map[string]string{
"server_host": host,
"server_version": "1.0.0-compat",
"topic": "t2",
"channel": "c2",
},
},
{
"nsq_client",
map[string]interface{}{
"ready_count": int64(22),
"inflight_count": int64(23),
"message_count": int64(24),
"finish_count": int64(25),
"requeue_count": int64(26),
},
map[string]string{"server_host": host, "server_version": "1.0.0-compat",
"topic": "t2", "channel": "c2",
"client_id": "377569bd462b", "client_hostname": "377569bd462b",
"client_version": "V2", "client_address": "172.17.0.8:48145",
"client_user_agent": "go-nsq/1.0.5", "client_tls": "true",
"client_snappy": "true", "client_deflate": "true"},
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, test.m, test.f, test.g)
}
}
// v1 version of localhost/stats?format=json reesponse body
var responseV1 = `
{
"version": "1.0.0-compat",
"health": "OK",
"start_time": 1452021674,
"topics": [
{
"topic_name": "t1",
"channels": [
{
"channel_name": "c1",
"depth": 0,
"backend_depth": 1,
"in_flight_count": 2,
"deferred_count": 3,
"message_count": 4,
"requeue_count": 5,
"timeout_count": 6,
"clients": [
{
"client_id": "373a715cd990",
"hostname": "373a715cd990",
"version": "V2",
"remote_address": "172.17.0.11:35560",
"state": 3,
"ready_count": 200,
"in_flight_count": 7,
"message_count": 8,
"finish_count": 9,
"requeue_count": 10,
"connect_ts": 1452021675,
"sample_rate": 11,
"deflate": false,
"snappy": false,
"user_agent": "nsq_to_nsq\/0.3.6 go-nsq\/1.0.5",
"tls": false,
"tls_cipher_suite": "",
"tls_version": "",
"tls_negotiated_protocol": "",
"tls_negotiated_protocol_is_mutual": false
}
],
"paused": false,
"e2e_processing_latency": {
"count": 0,
"percentiles": null
}
}
],
"depth": 12,
"backend_depth": 13,
"message_count": 14,
"paused": false,
"e2e_processing_latency": {
"count": 0,
"percentiles": null
}
},
{
"topic_name": "t2",
"channels": [
{
"channel_name": "c2",
"depth": 15,
"backend_depth": 16,
"in_flight_count": 17,
"deferred_count": 18,
"message_count": 19,
"requeue_count": 20,
"timeout_count": 21,
"clients": [
{
"client_id": "377569bd462b",
"hostname": "377569bd462b",
"version": "V2",
"remote_address": "172.17.0.8:48145",
"state": 3,
"ready_count": 22,
"in_flight_count": 23,
"message_count": 24,
"finish_count": 25,
"requeue_count": 26,
"connect_ts": 1452021678,
"sample_rate": 27,
"deflate": true,
"snappy": true,
"user_agent": "go-nsq\/1.0.5",
"tls": true,
"tls_cipher_suite": "",
"tls_version": "",
"tls_negotiated_protocol": "",
"tls_negotiated_protocol_is_mutual": false
}
],
"paused": false,
"e2e_processing_latency": {
"count": 0,
"percentiles": null
}
}
],
"depth": 28,
"backend_depth": 29,
"message_count": 30,
"paused": false,
"e2e_processing_latency": {
"count": 0,
"percentiles": null
}
}
]
}
`
// TestNSQStatsPreV1 is for backwards compatibility with nsq versions < 1.0
func TestNSQStatsPreV1(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, responsePreV1)
}))
defer ts.Close()
@@ -152,7 +409,7 @@ func TestNSQStats(t *testing.T) {
}
}
var response = `
var responsePreV1 = `
{
"status_code": 200,
"status_txt": "OK",

View File

@@ -69,7 +69,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
// Due to problems with a parsing, we have to use regexp expression in order
// to remove string that starts from '(' and ends with space
// see: https://github.com/influxdata/telegraf/issues/2386
reg, err := regexp.Compile("\\([\\S]*")
reg, err := regexp.Compile("\\s+\\([\\S]*")
if err != nil {
return err
}

View File

@@ -260,6 +260,57 @@ func TestParserNTPQ(t *testing.T) {
}
acc := testutil.Accumulator{}
assert.NoError(t, acc.GatherError(n.Gather))
fields := map[string]interface{}{
"poll": int64(64),
"when": int64(60),
"reach": int64(377),
"delay": float64(0.0),
"offset": float64(0.045),
"jitter": float64(1.012),
}
tags := map[string]string{
"remote": "SHM(0)",
"state_prefix": "*",
"refid": ".PPS.",
"stratum": "1",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
fields = map[string]interface{}{
"poll": int64(128),
"when": int64(121),
"reach": int64(377),
"delay": float64(0.0),
"offset": float64(10.105),
"jitter": float64(2.012),
}
tags = map[string]string{
"remote": "SHM(1)",
"state_prefix": "-",
"refid": ".GPS.",
"stratum": "1",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
fields = map[string]interface{}{
"poll": int64(1024),
"when": int64(10),
"reach": int64(377),
"delay": float64(1.748),
"offset": float64(0.373),
"jitter": float64(0.101),
}
tags = map[string]string{
"remote": "37.58.57.238",
"state_prefix": "+",
"refid": "192.53.103.103",
"stratum": "2",
"type": "u",
}
acc.AssertContainsTaggedFields(t, "ntpq", fields, tags)
}
func TestMultiNTPQ(t *testing.T) {
@@ -480,7 +531,9 @@ var multiNTPQ = ` remote refid st t when poll reach delay
`
var multiParserNTPQ = ` remote refid st t when poll reach delay offset jitter
==============================================================================
*SHM(0) .PPS. 1 u 60 64 377 0.000 0.045 1.012
+37.58.57.238 (d 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
+37.58.57.238 (domain) 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
+37.58.57.238 ( 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101
-SHM(1) .GPS. 1 u 121 128 377 0.000 10.105 2.012
`

View File

@@ -1022,7 +1022,7 @@ CREATE TABLE #PCounters
Primary Key(object_name, counter_name, instance_name)
);
INSERT #PCounters
SELECT RTrim(spi.object_name) object_name
SELECT DISTINCT RTrim(spi.object_name) object_name
, RTrim(spi.counter_name) counter_name
, RTrim(spi.instance_name) instance_name
, spi.cntr_value
@@ -1044,7 +1044,7 @@ CREATE TABLE #CCounters
Primary Key(object_name, counter_name, instance_name)
);
INSERT #CCounters
SELECT RTrim(spi.object_name) object_name
SELECT DISTINCT RTrim(spi.object_name) object_name
, RTrim(spi.counter_name) counter_name
, RTrim(spi.instance_name) instance_name
, spi.cntr_value

View File

@@ -101,8 +101,15 @@ func (rs *RunningStats) Percentile(n int) float64 {
}
i := int(float64(len(rs.perc)) * float64(n) / float64(100))
if i < 0 {
i = 0
}
return rs.perc[i]
return rs.perc[clamp(i, 0, len(rs.perc)-1)]
}
func clamp(i int, min int, max int) int {
if i < min {
return min
}
if i > max {
return max
}
return i
}

View File

@@ -23,12 +23,18 @@ func TestRunningStats_Single(t *testing.T) {
if rs.Lower() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Lower())
}
if rs.Percentile(100) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100))
}
if rs.Percentile(90) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
}
if rs.Percentile(50) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50))
}
if rs.Percentile(0) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(0))
}
if rs.Count() != 1 {
t.Errorf("Expected %v, got %v", 1, rs.Count())
}
@@ -58,12 +64,18 @@ func TestRunningStats_Duplicate(t *testing.T) {
if rs.Lower() != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Lower())
}
if rs.Percentile(100) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100))
}
if rs.Percentile(90) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90))
}
if rs.Percentile(50) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(50))
}
if rs.Percentile(0) != 10.1 {
t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(0))
}
if rs.Count() != 4 {
t.Errorf("Expected %v, got %v", 4, rs.Count())
}
@@ -93,12 +105,18 @@ func TestRunningStats(t *testing.T) {
if rs.Lower() != 5 {
t.Errorf("Expected %v, got %v", 5, rs.Lower())
}
if rs.Percentile(100) != 45 {
t.Errorf("Expected %v, got %v", 45, rs.Percentile(100))
}
if rs.Percentile(90) != 32 {
t.Errorf("Expected %v, got %v", 32, rs.Percentile(90))
}
if rs.Percentile(50) != 11 {
t.Errorf("Expected %v, got %v", 11, rs.Percentile(50))
}
if rs.Percentile(0) != 5 {
t.Errorf("Expected %v, got %v", 5, rs.Percentile(0))
}
if rs.Count() != 16 {
t.Errorf("Expected %v, got %v", 4, rs.Count())
}

View File

@@ -251,14 +251,14 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error {
}
for _, metric := range s.gauges {
acc.AddFields(metric.name, metric.fields, metric.tags, now)
acc.AddGauge(metric.name, metric.fields, metric.tags, now)
}
if s.DeleteGauges {
s.gauges = make(map[string]cachedgauge)
}
for _, metric := range s.counters {
acc.AddFields(metric.name, metric.fields, metric.tags, now)
acc.AddCounter(metric.name, metric.fields, metric.tags, now)
}
if s.DeleteCounters {
s.counters = make(map[string]cachedcounter)

View File

@@ -33,41 +33,48 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) {
tags := map[string]string{"pool": col[0], "health": col[8]}
fields := map[string]interface{}{}
size, err := strconv.ParseInt(col[1], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing size: %s", err)
}
fields["size"] = size
if tags["health"] == "UNAVAIL" {
alloc, err := strconv.ParseInt(col[2], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing allocation: %s", err)
}
fields["allocated"] = alloc
fields["size"] = int64(0)
free, err := strconv.ParseInt(col[3], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing free: %s", err)
}
fields["free"] = free
} else {
frag, err := strconv.ParseInt(strings.TrimSuffix(col[5], "%"), 10, 0)
if err != nil { // This might be - for RO devs
frag = 0
}
fields["fragmentation"] = frag
size, err := strconv.ParseInt(col[1], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing size: %s", err)
}
fields["size"] = size
capval, err := strconv.ParseInt(col[6], 10, 0)
if err != nil {
return "", fmt.Errorf("Error parsing capacity: %s", err)
}
fields["capacity"] = capval
alloc, err := strconv.ParseInt(col[2], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing allocation: %s", err)
}
fields["allocated"] = alloc
dedup, err := strconv.ParseFloat(strings.TrimSuffix(col[7], "x"), 32)
if err != nil {
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
free, err := strconv.ParseInt(col[3], 10, 64)
if err != nil {
return "", fmt.Errorf("Error parsing free: %s", err)
}
fields["free"] = free
frag, err := strconv.ParseInt(strings.TrimSuffix(col[5], "%"), 10, 0)
if err != nil { // This might be - for RO devs
frag = 0
}
fields["fragmentation"] = frag
capval, err := strconv.ParseInt(col[6], 10, 0)
if err != nil {
return "", fmt.Errorf("Error parsing capacity: %s", err)
}
fields["capacity"] = capval
dedup, err := strconv.ParseFloat(strings.TrimSuffix(col[7], "x"), 32)
if err != nil {
return "", fmt.Errorf("Error parsing dedupratio: %s", err)
}
fields["dedupratio"] = dedup
}
fields["dedupratio"] = dedup
acc.AddFields("zfs_pool", fields, tags)
}

View File

@@ -22,6 +22,15 @@ func mock_zpool() ([]string, error) {
return zpool_output, nil
}
// $ zpool list -Hp
var zpool_output_unavail = []string{
"temp2 - - - - - - - UNAVAIL -",
}
func mock_zpool_unavail() ([]string, error) {
return zpool_output_unavail, nil
}
// sysctl -q kstat.zfs.misc.arcstats
// sysctl -q kstat.zfs.misc.vdev_cache_stats
@@ -82,6 +91,41 @@ func TestZfsPoolMetrics(t *testing.T) {
acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
}
func TestZfsPoolMetrics_unavail(t *testing.T) {
var acc testutil.Accumulator
z := &Zfs{
KstatMetrics: []string{"vdev_cache_stats"},
sysctl: mock_sysctl,
zpool: mock_zpool_unavail,
}
err := z.Gather(&acc)
require.NoError(t, err)
require.False(t, acc.HasMeasurement("zfs_pool"))
acc.Metrics = nil
z = &Zfs{
KstatMetrics: []string{"vdev_cache_stats"},
PoolMetrics: true,
sysctl: mock_sysctl,
zpool: mock_zpool_unavail,
}
err = z.Gather(&acc)
require.NoError(t, err)
//one pool, UNAVAIL
tags := map[string]string{
"pool": "temp2",
"health": "UNAVAIL",
}
poolMetrics := getTemp2PoolMetrics()
acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags)
}
func TestZfsGeneratesMetrics(t *testing.T) {
var acc testutil.Accumulator
@@ -128,6 +172,12 @@ func getFreeNasBootPoolMetrics() map[string]interface{} {
}
}
func getTemp2PoolMetrics() map[string]interface{} {
return map[string]interface{}{
"size": int64(0),
}
}
func getKstatMetricsVdevOnly() map[string]interface{} {
return map[string]interface{}{
"vdev_cache_stats_misses": int64(87789),

View File

@@ -193,6 +193,25 @@ func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum {
continue
}
// Do CloudWatch boundary checking
// Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html
if math.IsNaN(value) {
datums = datums[:len(datums)-1]
continue
}
if math.IsInf(value, 0) {
datums = datums[:len(datums)-1]
continue
}
if value > 0 && value < float64(8.515920e-109) {
datums = datums[:len(datums)-1]
continue
}
if value > float64(1.174271e+108) {
datums = datums[:len(datums)-1]
continue
}
datums[i] = &cloudwatch.MetricDatum{
MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")),
Value: aws.Float64(value),

View File

@@ -1,6 +1,8 @@
package cloudwatch
import (
"fmt"
"math"
"sort"
"testing"
@@ -51,22 +53,32 @@ func TestBuildDimensions(t *testing.T) {
func TestBuildMetricDatums(t *testing.T) {
assert := assert.New(t)
zero := 0.0
validMetrics := []telegraf.Metric{
testutil.TestMetric(1),
testutil.TestMetric(int32(1)),
testutil.TestMetric(int64(1)),
testutil.TestMetric(float64(1)),
testutil.TestMetric(float64(0)),
testutil.TestMetric(math.Copysign(zero, -1)), // the CW documentation does not call out -0 as rejected
testutil.TestMetric(float64(8.515920e-109)),
testutil.TestMetric(float64(1.174271e+108)), // largest should be 1.174271e+108
testutil.TestMetric(true),
}
invalidMetrics := []telegraf.Metric{
testutil.TestMetric("Foo"),
testutil.TestMetric(math.Log(-1.0)),
testutil.TestMetric(float64(8.515919e-109)), // smallest should be 8.515920e-109
testutil.TestMetric(float64(1.174272e+108)), // largest should be 1.174271e+108
}
for _, point := range validMetrics {
datums := BuildMetricDatum(point)
assert.Equal(1, len(datums), "Valid type should create a Datum")
assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point))
}
for _, point := range invalidMetrics {
datums := BuildMetricDatum(point)
assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point))
}
nonValidPoint := testutil.TestMetric("Foo")
assert.Equal(0, len(BuildMetricDatum(nonValidPoint)), "Invalid type should not create a Datum")
}
func TestPartitionDatums(t *testing.T) {
@@ -78,10 +90,13 @@ func TestPartitionDatums(t *testing.T) {
Value: aws.Float64(1),
}
zeroDatum := []*cloudwatch.MetricDatum{}
oneDatum := []*cloudwatch.MetricDatum{&testDatum}
twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum}
threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum}
assert.Equal([][]*cloudwatch.MetricDatum{}, PartitionDatums(2, zeroDatum))
assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum))
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum))
assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum))

View File

@@ -44,6 +44,9 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
## HTTP Proxy Config
# http_proxy = "http://corporate.proxy:3128"
## Optional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## Compress each HTTP request payload using GZIP.
# content_encoding = "gzip"
```
@@ -70,4 +73,5 @@ to write to. Each URL should start with either `http://` or `udp://`
* `ssl_key`: SSL key
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
* `http_proxy`: HTTP Proxy URI
* `http_headers`: HTTP headers to add to each HTTP request
* `content_encoding`: Compress each HTTP request payload using gzip if set to: "gzip"

View File

@@ -68,6 +68,8 @@ func NewHTTP(config HTTPConfig, defaultWP WriteParams) (Client, error) {
}, nil
}
type HTTPHeaders map[string]string
type HTTPConfig struct {
// URL should be of the form "http://host:port" (REQUIRED)
URL string
@@ -95,6 +97,9 @@ type HTTPConfig struct {
// Proxy URL should be of the form "http://host:port"
HTTPProxy string
// HTTP headers to append to HTTP requests.
HTTPHeaders HTTPHeaders
// The content encoding mechanism to use for each request.
ContentEncoding string
}
@@ -253,6 +258,11 @@ func (c *httpClient) makeRequest(uri string, body io.Reader) (*http.Request, err
if err != nil {
return nil, err
}
for header, value := range c.config.HTTPHeaders {
req.Header.Set(header, value)
}
req.Header.Set("Content-Type", "text/plain")
req.Header.Set("User-Agent", c.config.UserAgent)
if c.config.Username != "" && c.config.Password != "" {

View File

@@ -55,6 +55,13 @@ func TestHTTPClient_Write(t *testing.T) {
fmt.Fprintln(w, `{"results":[{}],"error":"basic auth incorrect"}`)
}
// test that user-specified http header is set properly
if r.Header.Get("X-Test-Header") != "Test-Value" {
w.WriteHeader(http.StatusTeapot)
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, `{"results":[{}],"error":"wrong http header value"}`)
}
// Validate Content-Length Header
if r.ContentLength != 13 {
w.WriteHeader(http.StatusTeapot)
@@ -90,6 +97,9 @@ func TestHTTPClient_Write(t *testing.T) {
UserAgent: "test-agent",
Username: "test-user",
Password: "test-password",
HTTPHeaders: HTTPHeaders{
"X-Test-Header": "Test-Value",
},
}
wp := WriteParams{
Database: "test",

View File

@@ -32,9 +32,10 @@ type InfluxDB struct {
RetentionPolicy string
WriteConsistency string
Timeout internal.Duration
UDPPayload int `toml:"udp_payload"`
HTTPProxy string `toml:"http_proxy"`
ContentEncoding string `toml:"content_encoding"`
UDPPayload int `toml:"udp_payload"`
HTTPProxy string `toml:"http_proxy"`
HTTPHeaders map[string]string `toml:"http_headers"`
ContentEncoding string `toml:"content_encoding"`
// Path to CA file
SSLCA string `toml:"ssl_ca"`
@@ -89,6 +90,9 @@ var sampleConfig = `
## HTTP Proxy Config
# http_proxy = "http://corporate.proxy:3128"
## Optional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## Compress each HTTP request payload using GZIP.
# content_encoding = "gzip"
`
@@ -132,8 +136,12 @@ func (i *InfluxDB) Connect() error {
Username: i.Username,
Password: i.Password,
HTTPProxy: i.HTTPProxy,
HTTPHeaders: client.HTTPHeaders{},
ContentEncoding: i.ContentEncoding,
}
for header, value := range i.HTTPHeaders {
config.HTTPHeaders[header] = value
}
wp := client.WriteParams{
Database: i.Database,
RetentionPolicy: i.RetentionPolicy,

View File

@@ -5,6 +5,7 @@ import (
"log"
"net"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
@@ -13,6 +14,16 @@ import (
"github.com/influxdata/telegraf/plugins/outputs"
)
var (
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`)
hypenChars = strings.NewReplacer(
"@", "-",
"*", "-",
`%`, "-",
"#", "-",
"$", "-")
)
type OpenTSDB struct {
Prefix string
@@ -24,9 +35,6 @@ type OpenTSDB struct {
Debug bool
}
var sanitizedChars = strings.NewReplacer("@", "-", "*", "-", " ", "_",
`%`, "-", "#", "-", "$", "-", ":", "_")
var sampleConfig = `
## prefix for metrics keys
prefix = "my.specific.prefix."
@@ -125,8 +133,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
}
metric := &HttpMetric{
Metric: sanitizedChars.Replace(fmt.Sprintf("%s%s_%s",
o.Prefix, m.Name(), fieldName)),
Metric: sanitize(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
Tags: tags,
Timestamp: now,
Value: value,
@@ -176,7 +183,7 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
}
messageLine := fmt.Sprintf("put %s %v %s %s\n",
sanitizedChars.Replace(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
sanitize(fmt.Sprintf("%s%s_%s", o.Prefix, m.Name(), fieldName)),
now, metricValue, tags)
_, err := connection.Write([]byte(messageLine))
@@ -192,7 +199,7 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
func cleanTags(tags map[string]string) map[string]string {
tagSet := make(map[string]string, len(tags))
for k, v := range tags {
tagSet[sanitizedChars.Replace(k)] = sanitizedChars.Replace(v)
tagSet[sanitize(k)] = sanitize(v)
}
return tagSet
}
@@ -236,6 +243,13 @@ func (o *OpenTSDB) Close() error {
return nil
}
func sanitize(value string) string {
// Apply special hypenation rules to preserve backwards compatibility
value = hypenChars.Replace(value)
// Replace any remaining illegal chars
return allowedChars.ReplaceAllLiteralString(value, "_")
}
func init() {
outputs.Add("opentsdb", func() telegraf.Output {
return &OpenTSDB{}

View File

@@ -10,9 +10,10 @@ import (
"strconv"
"testing"
"github.com/stretchr/testify/require"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil"
//"github.com/stretchr/testify/require"
)
func TestCleanTags(t *testing.T) {
@@ -29,8 +30,16 @@ func TestCleanTags(t *testing.T) {
map[string]string{"aaa": "bbb"},
},
{
map[string]string{"Sp%ci@l Chars": "g$t repl#ced"},
map[string]string{"Sp-ci-l_Chars": "g-t_repl-ced"},
map[string]string{"Sp%ci@l Chars[": "g$t repl#ce)d"},
map[string]string{"Sp-ci-l_Chars_": "g-t_repl-ce_d"},
},
{
map[string]string{"μnicodε_letters": "okαy"},
map[string]string{"μnicodε_letters": "okαy"},
},
{
map[string]string{"n☺": "emojies☠"},
map[string]string{"n_": "emojies_"},
},
{
map[string]string{},
@@ -75,6 +84,47 @@ func TestBuildTagsTelnet(t *testing.T) {
}
}
func TestSanitize(t *testing.T) {
tests := []struct {
name string
value string
expected string
}{
{
name: "Ascii letters and numbers allowed",
value: "ascii 123",
expected: "ascii_123",
},
{
name: "Allowed punct",
value: "-_./",
expected: "-_./",
},
{
name: "Special conversions to hyphen",
value: "@*%#$!",
expected: "-----_",
},
{
name: "Unicode Letters allowed",
value: "μnicodε_letters",
expected: "μnicodε_letters",
},
{
name: "Other Unicode not allowed",
value: "“☢”",
expected: "___",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := sanitize(tt.value)
require.Equal(t, tt.expected, actual)
})
}
}
func BenchmarkHttpSend(b *testing.B) {
const BatchSize = 50
const MetricsCount = 4 * BatchSize

View File

@@ -32,13 +32,22 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) {
}
for fieldName, value := range metric.Fields() {
// Convert value to string
valueS := fmt.Sprintf("%#v", value)
point := []byte(fmt.Sprintf("%s %s %d\n",
switch v := value.(type) {
case string:
continue
case bool:
if v {
value = 1
} else {
value = 0
}
}
metricString := fmt.Sprintf("%s %#v %d\n",
// insert "field" section of template
sanitizedChars.Replace(InsertField(bucket, fieldName)),
sanitizedChars.Replace(valueS),
timestamp))
value,
timestamp)
point := []byte(metricString)
out = append(out, point...)
}
return out, nil

View File

@@ -165,6 +165,58 @@ func TestSerializeValueField2(t *testing.T) {
assert.Equal(t, expS, mS)
}
func TestSerializeValueString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"cpu": "cpu0",
"datacenter": "us-west-2",
}
fields := map[string]interface{}{
"value": "asdasd",
}
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
s := GraphiteSerializer{
Template: "host.field.tags.measurement",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
assert.NoError(t, err)
assert.Equal(t, "", mS[0])
}
func TestSerializeValueBoolean(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"cpu": "cpu0",
"datacenter": "us-west-2",
}
fields := map[string]interface{}{
"enabled": true,
"disabled": false,
}
m, err := metric.New("cpu", tags, fields, now)
assert.NoError(t, err)
s := GraphiteSerializer{
Template: "host.field.tags.measurement",
}
buf, _ := s.Serialize(m)
mS := strings.Split(strings.TrimSpace(string(buf)), "\n")
assert.NoError(t, err)
expS := []string{
fmt.Sprintf("localhost.enabled.cpu0.us-west-2.cpu 1 %d", now.Unix()),
fmt.Sprintf("localhost.disabled.cpu0.us-west-2.cpu 0 %d", now.Unix()),
}
sort.Strings(mS)
sort.Strings(expS)
assert.Equal(t, expS, mS)
}
// test that fields with spaces get fixed.
func TestSerializeFieldWithSpaces(t *testing.T) {
now := time.Now()

View File

@@ -274,6 +274,8 @@ def get_system_arch():
arch = "amd64"
elif arch == "386":
arch = "i386"
elif "arm64" in arch:
arch = "arm64"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
@@ -446,6 +448,8 @@ def build(version=None,
# Handle variations in architecture output
if arch == "i386" or arch == "i686":
arch = "386"
elif "arm64" in arch:
arch = "arm64"
elif "arm" in arch:
arch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, arch)