Compare commits
14 Commits
1.0.0-beta
...
1.0.0-beta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2beef21231 | ||
|
|
cb3c54a1ae | ||
|
|
d50a1e83ac | ||
|
|
1f10639222 | ||
|
|
af0979cce5 | ||
|
|
5b43901bd8 | ||
|
|
d7efb7a71d | ||
|
|
4d242836ee | ||
|
|
06cb5a041e | ||
|
|
ea2521bf27 | ||
|
|
4cd1f7a104 | ||
|
|
137843b2f6 | ||
|
|
008ed17a79 | ||
|
|
75e6cb9064 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -11,6 +11,8 @@ Erase the other section and everything on and above this line.
|
|||||||
|
|
||||||
## Bug report
|
## Bug report
|
||||||
|
|
||||||
|
### Relevant telegraf.conf:
|
||||||
|
|
||||||
### System info:
|
### System info:
|
||||||
|
|
||||||
[Include Telegraf version, operating system name, and other relevant details]
|
[Include Telegraf version, operating system name, and other relevant details]
|
||||||
|
|||||||
24
CHANGELOG.md
24
CHANGELOG.md
@@ -1,3 +1,27 @@
|
|||||||
|
## v1.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
## v1.0 beta 2 [2016-06-21]
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- [#1340](https://github.com/influxdata/telegraf/issues/1340): statsd: do not log every dropped metric.
|
||||||
|
- [#1368](https://github.com/influxdata/telegraf/pull/1368): Add precision rounding to all metrics on collection.
|
||||||
|
- [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine
|
||||||
|
- [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns.
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#1330](https://github.com/influxdata/telegraf/issues/1330): Fix exec plugin panic when using single binary.
|
||||||
|
- [#1336](https://github.com/influxdata/telegraf/issues/1336): Fixed incorrect prometheus metrics source selection.
|
||||||
|
- [#1112](https://github.com/influxdata/telegraf/issues/1112): Set default Zookeeper chroot to empty string.
|
||||||
|
- [#1335](https://github.com/influxdata/telegraf/issues/1335): Fix overall ping timeout to be calculated based on per-ping timeout.
|
||||||
|
- [#1374](https://github.com/influxdata/telegraf/pull/1374): Change "default" retention policy to "".
|
||||||
|
- [#1377](https://github.com/influxdata/telegraf/issues/1377): Graphite output mangling '%' character.
|
||||||
|
|
||||||
## v1.0 beta 1 [2016-06-07]
|
## v1.0 beta 1 [2016-06-07]
|
||||||
|
|
||||||
### Release Notes
|
### Release Notes
|
||||||
|
|||||||
1
Godeps
1
Godeps
@@ -47,6 +47,7 @@ github.com/shirou/gopsutil 586bb697f3ec9f8ec08ffefe18f521a64534037c
|
|||||||
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
|
||||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||||
|
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||||
|
|||||||
18
README.md
18
README.md
@@ -20,12 +20,12 @@ new plugins.
|
|||||||
### Linux deb and rpm Packages:
|
### Linux deb and rpm Packages:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta1_amd64.deb
|
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta2_amd64.deb
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta1.x86_64.rpm
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta2.x86_64.rpm
|
||||||
|
|
||||||
Latest (arm):
|
Latest (arm):
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta1_armhf.deb
|
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0-beta2_armhf.deb
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta1.armhf.rpm
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_beta2.armhf.rpm
|
||||||
|
|
||||||
##### Package Instructions:
|
##### Package Instructions:
|
||||||
|
|
||||||
@@ -46,14 +46,14 @@ to use this repo to install & update telegraf.
|
|||||||
### Linux tarballs:
|
### Linux tarballs:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_linux_amd64.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_linux_amd64.tar.gz
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_linux_i386.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_linux_i386.tar.gz
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_linux_armhf.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_linux_armhf.tar.gz
|
||||||
|
|
||||||
### FreeBSD tarball:
|
### FreeBSD tarball:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_freebsd_amd64.tar.gz
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_freebsd_amd64.tar.gz
|
||||||
|
|
||||||
### Ansible Role:
|
### Ansible Role:
|
||||||
|
|
||||||
@@ -69,7 +69,7 @@ brew install telegraf
|
|||||||
### Windows Binaries (EXPERIMENTAL)
|
### Windows Binaries (EXPERIMENTAL)
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta1_windows_amd64.zip
|
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0-beta2_windows_amd64.zip
|
||||||
|
|
||||||
### From Source:
|
### From Source:
|
||||||
|
|
||||||
|
|||||||
@@ -18,4 +18,8 @@ type Accumulator interface {
|
|||||||
|
|
||||||
Debug() bool
|
Debug() bool
|
||||||
SetDebug(enabled bool)
|
SetDebug(enabled bool)
|
||||||
|
|
||||||
|
SetPrecision(precision, interval time.Duration)
|
||||||
|
|
||||||
|
DisablePrecision()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ func NewAccumulator(
|
|||||||
acc := accumulator{}
|
acc := accumulator{}
|
||||||
acc.metrics = metrics
|
acc.metrics = metrics
|
||||||
acc.inputConfig = inputConfig
|
acc.inputConfig = inputConfig
|
||||||
|
acc.precision = time.Nanosecond
|
||||||
return &acc
|
return &acc
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,6 +33,8 @@ type accumulator struct {
|
|||||||
inputConfig *internal_models.InputConfig
|
inputConfig *internal_models.InputConfig
|
||||||
|
|
||||||
prefix string
|
prefix string
|
||||||
|
|
||||||
|
precision time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) Add(
|
func (ac *accumulator) Add(
|
||||||
@@ -141,6 +144,7 @@ func (ac *accumulator) AddFields(
|
|||||||
} else {
|
} else {
|
||||||
timestamp = time.Now()
|
timestamp = time.Now()
|
||||||
}
|
}
|
||||||
|
timestamp = timestamp.Round(ac.precision)
|
||||||
|
|
||||||
if ac.prefix != "" {
|
if ac.prefix != "" {
|
||||||
measurement = ac.prefix + measurement
|
measurement = ac.prefix + measurement
|
||||||
@@ -173,6 +177,31 @@ func (ac *accumulator) SetTrace(trace bool) {
|
|||||||
ac.trace = trace
|
ac.trace = trace
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetPrecision takes two time.Duration objects. If the first is non-zero,
|
||||||
|
// it sets that as the precision. Otherwise, it takes the second argument
|
||||||
|
// as the order of time that the metrics should be rounded to, with the
|
||||||
|
// maximum being 1s.
|
||||||
|
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
|
||||||
|
if precision > 0 {
|
||||||
|
ac.precision = precision
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case interval >= time.Second:
|
||||||
|
ac.precision = time.Second
|
||||||
|
case interval >= time.Millisecond:
|
||||||
|
ac.precision = time.Millisecond
|
||||||
|
case interval >= time.Microsecond:
|
||||||
|
ac.precision = time.Microsecond
|
||||||
|
default:
|
||||||
|
ac.precision = time.Nanosecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) DisablePrecision() {
|
||||||
|
ac.precision = time.Nanosecond
|
||||||
|
}
|
||||||
|
|
||||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||||
ac.defaultTags = tags
|
ac.defaultTags = tags
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,6 +38,128 @@ func TestAdd(t *testing.T) {
|
|||||||
actual)
|
actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Second)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(time.Second, time.Millisecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddDisablePrecision(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(time.Second, time.Millisecond)
|
||||||
|
a.DisablePrecision()
|
||||||
|
a.Add("acctest", float64(101), map[string]string{})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"})
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDifferentPrecisions(t *testing.T) {
|
||||||
|
a := accumulator{}
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
a.metrics = make(chan telegraf.Metric, 10)
|
||||||
|
defer close(a.metrics)
|
||||||
|
a.inputConfig = &internal_models.InputConfig{}
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Second)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm := <-a.metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Millisecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800083000000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Microsecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082913000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Nanosecond)
|
||||||
|
a.Add("acctest", float64(101), map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-a.metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
func TestAddDefaultTags(t *testing.T) {
|
func TestAddDefaultTags(t *testing.T) {
|
||||||
a := accumulator{}
|
a := accumulator{}
|
||||||
a.addDefaultTag("default", "tag")
|
a.addDefaultTag("default", "tag")
|
||||||
|
|||||||
@@ -118,6 +118,8 @@ func (a *Agent) gatherer(
|
|||||||
|
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||||
|
a.Config.Agent.Interval.Duration)
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||||
@@ -201,6 +203,8 @@ func (a *Agent) Test() error {
|
|||||||
for _, input := range a.Config.Inputs {
|
for _, input := range a.Config.Inputs {
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetTrace(true)
|
acc.SetTrace(true)
|
||||||
|
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||||
|
a.Config.Agent.Interval.Duration)
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||||
@@ -289,6 +293,9 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
|||||||
case telegraf.ServiceInput:
|
case telegraf.ServiceInput:
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
acc := NewAccumulator(input.Config, metricC)
|
||||||
acc.SetDebug(a.Config.Agent.Debug)
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
// Service input plugins should set their own precision of their
|
||||||
|
// metrics.
|
||||||
|
acc.DisablePrecision()
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
acc.setDefaultTags(a.Config.Tags)
|
||||||
if err := p.Start(acc); err != nil {
|
if err := p.Start(acc); err != nil {
|
||||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||||
|
|||||||
@@ -52,6 +52,11 @@
|
|||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||||
flush_jitter = "0s"
|
flush_jitter = "0s"
|
||||||
|
|
||||||
|
## By default, precision will be set to the same timestamp order as the
|
||||||
|
## collection interval, with the maximum being 1s.
|
||||||
|
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||||
|
## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns".
|
||||||
|
precision = ""
|
||||||
## Run telegraf in debug mode
|
## Run telegraf in debug mode
|
||||||
debug = false
|
debug = false
|
||||||
## Run telegraf in quiet mode
|
## Run telegraf in quiet mode
|
||||||
@@ -75,12 +80,9 @@
|
|||||||
urls = ["http://localhost:8086"] # required
|
urls = ["http://localhost:8086"] # required
|
||||||
## The target database for metrics (telegraf will create it if not exists).
|
## The target database for metrics (telegraf will create it if not exists).
|
||||||
database = "telegraf" # required
|
database = "telegraf" # required
|
||||||
## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
|
||||||
## note: using "s" precision greatly improves InfluxDB compression.
|
|
||||||
precision = "s"
|
|
||||||
|
|
||||||
## Retention policy to write to.
|
## Retention policy to write to. Empty string writes to the default rp.
|
||||||
retention_policy = "default"
|
retention_policy = ""
|
||||||
## Write consistency (clusters only), can be: "any", "one", "quorom", "all"
|
## Write consistency (clusters only), can be: "any", "one", "quorom", "all"
|
||||||
write_consistency = "any"
|
write_consistency = "any"
|
||||||
|
|
||||||
@@ -1138,7 +1140,7 @@
|
|||||||
# count = 1 # required
|
# count = 1 # required
|
||||||
# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||||
# ping_interval = 0.0
|
# ping_interval = 0.0
|
||||||
# ## ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||||
# timeout = 1.0
|
# timeout = 1.0
|
||||||
# ## interface to send ping from (ping -I <INTERFACE>)
|
# ## interface to send ping from (ping -I <INTERFACE>)
|
||||||
# interface = ""
|
# interface = ""
|
||||||
@@ -1501,7 +1503,7 @@
|
|||||||
# ## an array of Zookeeper connection strings
|
# ## an array of Zookeeper connection strings
|
||||||
# zookeeper_peers = ["localhost:2181"]
|
# zookeeper_peers = ["localhost:2181"]
|
||||||
# ## Zookeeper Chroot
|
# ## Zookeeper Chroot
|
||||||
# zookeeper_chroot = "/"
|
# zookeeper_chroot = ""
|
||||||
# ## the name of the consumer group
|
# ## the name of the consumer group
|
||||||
# consumer_group = "telegraf_metrics_consumers"
|
# consumer_group = "telegraf_metrics_consumers"
|
||||||
# ## Offset (must be either "oldest" or "newest")
|
# ## Offset (must be either "oldest" or "newest")
|
||||||
@@ -1514,6 +1516,35 @@
|
|||||||
# data_format = "influx"
|
# data_format = "influx"
|
||||||
|
|
||||||
|
|
||||||
|
# # Stream and parse log file(s).
|
||||||
|
# [[inputs.logparser]]
|
||||||
|
# ## Log files to parse.
|
||||||
|
# ## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
# ## ** as a "super asterisk". ie:
|
||||||
|
# ## /var/log/**.log -> recursively find all .log files in /var/log
|
||||||
|
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||||
|
# ## /var/log/apache.log -> only tail the apache log file
|
||||||
|
# files = ["/var/log/influxdb/influxdb.log"]
|
||||||
|
# ## Read file from beginning.
|
||||||
|
# from_beginning = false
|
||||||
|
#
|
||||||
|
# ## Parse logstash-style "grok" patterns:
|
||||||
|
# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
|
||||||
|
# [inputs.logparser.grok]
|
||||||
|
# ## This is a list of patterns to check the given log file(s) for.
|
||||||
|
# ## Note that adding patterns here increases processing time. The most
|
||||||
|
# ## efficient configuration is to have one pattern per logparser.
|
||||||
|
# ## Other common built-in patterns are:
|
||||||
|
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
|
||||||
|
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
|
||||||
|
# patterns = ["%{INFLUXDB_HTTPD_LOG}"]
|
||||||
|
# ## Full path(s) to custom pattern files.
|
||||||
|
# custom_pattern_files = []
|
||||||
|
# ## Custom patterns can also be defined here. Put one pattern per line.
|
||||||
|
# custom_patterns = '''
|
||||||
|
# '''
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics from MQTT topic(s)
|
# # Read metrics from MQTT topic(s)
|
||||||
# [[inputs.mqtt_consumer]]
|
# [[inputs.mqtt_consumer]]
|
||||||
# servers = ["localhost:1883"]
|
# servers = ["localhost:1883"]
|
||||||
|
|||||||
79
filter/filter.go
Normal file
79
filter/filter.go
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
package filter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gobwas/glob"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Filter interface {
|
||||||
|
Match(string) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompileFilter takes a list of string filters and returns a Filter interface
|
||||||
|
// for matching a given string against the filter list. The filter list
|
||||||
|
// supports glob matching too, ie:
|
||||||
|
//
|
||||||
|
// f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
// f.Match("cpu") // true
|
||||||
|
// f.Match("network") // true
|
||||||
|
// f.Match("memory") // false
|
||||||
|
//
|
||||||
|
func CompileFilter(filters []string) (Filter, error) {
|
||||||
|
// return if there is nothing to compile
|
||||||
|
if len(filters) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we can compile a non-glob filter
|
||||||
|
noGlob := true
|
||||||
|
for _, filter := range filters {
|
||||||
|
if hasMeta(filter) {
|
||||||
|
noGlob = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case noGlob:
|
||||||
|
// return non-globbing filter if not needed.
|
||||||
|
return compileFilterNoGlob(filters), nil
|
||||||
|
case len(filters) == 1:
|
||||||
|
return glob.Compile(filters[0])
|
||||||
|
default:
|
||||||
|
return glob.Compile("{" + strings.Join(filters, ",") + "}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasMeta reports whether path contains any magic glob characters.
|
||||||
|
func hasMeta(s string) bool {
|
||||||
|
return strings.IndexAny(s, "*?[") >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type filter struct {
|
||||||
|
m map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filter) Match(s string) bool {
|
||||||
|
_, ok := f.m[s]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
type filtersingle struct {
|
||||||
|
s string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *filtersingle) Match(s string) bool {
|
||||||
|
return f.s == s
|
||||||
|
}
|
||||||
|
|
||||||
|
func compileFilterNoGlob(filters []string) Filter {
|
||||||
|
if len(filters) == 1 {
|
||||||
|
return &filtersingle{s: filters[0]}
|
||||||
|
}
|
||||||
|
out := filter{m: make(map[string]struct{})}
|
||||||
|
for _, filter := range filters {
|
||||||
|
out.m[filter] = struct{}{}
|
||||||
|
}
|
||||||
|
return &out
|
||||||
|
}
|
||||||
96
filter/filter_test.go
Normal file
96
filter/filter_test.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
package filter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCompileFilter(t *testing.T) {
|
||||||
|
f, err := CompileFilter([]string{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, f)
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.False(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu*"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.True(t, f.Match("cpu0"))
|
||||||
|
assert.False(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu", "mem"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.True(t, f.Match("mem"))
|
||||||
|
|
||||||
|
f, err = CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, f.Match("cpu"))
|
||||||
|
assert.False(t, f.Match("cpu0"))
|
||||||
|
assert.True(t, f.Match("mem"))
|
||||||
|
assert.True(t, f.Match("network"))
|
||||||
|
}
|
||||||
|
|
||||||
|
var benchbool bool
|
||||||
|
|
||||||
|
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("network")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("cpu")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilter(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu", "mem", "net*"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("network")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilterNoGlob(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"cpu", "mem", "net"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("net")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilter2(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||||
|
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("network")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkFilter2NoGlob(b *testing.B) {
|
||||||
|
f, _ := CompileFilter([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
||||||
|
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
|
||||||
|
var tmp bool
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
tmp = f.Match("net")
|
||||||
|
}
|
||||||
|
benchbool = tmp
|
||||||
|
}
|
||||||
@@ -77,6 +77,14 @@ type AgentConfig struct {
|
|||||||
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
|
||||||
RoundInterval bool
|
RoundInterval bool
|
||||||
|
|
||||||
|
// By default, precision will be set to the same timestamp order as the
|
||||||
|
// collection interval, with the maximum being 1s.
|
||||||
|
// ie, when interval = "10s", precision will be "1s"
|
||||||
|
// when interval = "250ms", precision will be "1ms"
|
||||||
|
// Precision will NOT be used for service inputs. It is up to each individual
|
||||||
|
// service input to set the timestamp at the appropriate precision.
|
||||||
|
Precision internal.Duration
|
||||||
|
|
||||||
// CollectionJitter is used to jitter the collection by a random amount.
|
// CollectionJitter is used to jitter the collection by a random amount.
|
||||||
// Each plugin will sleep for a random time within jitter before collecting.
|
// Each plugin will sleep for a random time within jitter before collecting.
|
||||||
// This can be used to avoid many plugins querying things like sysfs at the
|
// This can be used to avoid many plugins querying things like sysfs at the
|
||||||
@@ -108,11 +116,10 @@ type AgentConfig struct {
|
|||||||
// does _not_ deactivate FlushInterval.
|
// does _not_ deactivate FlushInterval.
|
||||||
FlushBufferWhenFull bool
|
FlushBufferWhenFull bool
|
||||||
|
|
||||||
// TODO(cam): Remove UTC and Precision parameters, they are no longer
|
// TODO(cam): Remove UTC and parameter, they are no longer
|
||||||
// valid for the agent config. Leaving them here for now for backwards-
|
// valid for the agent config. Leaving them here for now for backwards-
|
||||||
// compatability
|
// compatability
|
||||||
UTC bool `toml:"utc"`
|
UTC bool `toml:"utc"`
|
||||||
Precision string
|
|
||||||
|
|
||||||
// Debug is the option for running in debug mode
|
// Debug is the option for running in debug mode
|
||||||
Debug bool
|
Debug bool
|
||||||
@@ -209,6 +216,11 @@ var header = `# Telegraf Configuration
|
|||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||||
flush_jitter = "0s"
|
flush_jitter = "0s"
|
||||||
|
|
||||||
|
## By default, precision will be set to the same timestamp order as the
|
||||||
|
## collection interval, with the maximum being 1s.
|
||||||
|
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||||
|
## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns".
|
||||||
|
precision = ""
|
||||||
## Run telegraf in debug mode
|
## Run telegraf in debug mode
|
||||||
debug = false
|
debug = false
|
||||||
## Run telegraf in quiet mode
|
## Run telegraf in quiet mode
|
||||||
|
|||||||
@@ -17,8 +17,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/gobwas/glob"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||||
@@ -209,27 +207,6 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompileFilter takes a list of glob "filters", ie:
|
|
||||||
// ["MAIN.*", "CPU.*", "NET"]
|
|
||||||
// and compiles them into a glob object. This glob object can
|
|
||||||
// then be used to match keys to the filter.
|
|
||||||
func CompileFilter(filters []string) (glob.Glob, error) {
|
|
||||||
var out glob.Glob
|
|
||||||
|
|
||||||
// return if there is nothing to compile
|
|
||||||
if len(filters) == 0 {
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if len(filters) == 1 {
|
|
||||||
out, err = glob.Compile(filters[0])
|
|
||||||
} else {
|
|
||||||
out, err = glob.Compile("{" + strings.Join(filters, ",") + "}")
|
|
||||||
}
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RandomSleep will sleep for a random amount of time up to max.
|
// RandomSleep will sleep for a random amount of time up to max.
|
||||||
// If the shutdown channel is closed, it will return before it has finished
|
// If the shutdown channel is closed, it will return before it has finished
|
||||||
// sleeping.
|
// sleeping.
|
||||||
|
|||||||
@@ -107,37 +107,6 @@ func TestRunError(t *testing.T) {
|
|||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCompileFilter(t *testing.T) {
|
|
||||||
f, err := CompileFilter([]string{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, f)
|
|
||||||
|
|
||||||
f, err = CompileFilter([]string{"cpu"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.False(t, f.Match("cpu0"))
|
|
||||||
assert.False(t, f.Match("mem"))
|
|
||||||
|
|
||||||
f, err = CompileFilter([]string{"cpu*"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.True(t, f.Match("cpu0"))
|
|
||||||
assert.False(t, f.Match("mem"))
|
|
||||||
|
|
||||||
f, err = CompileFilter([]string{"cpu", "mem"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.False(t, f.Match("cpu0"))
|
|
||||||
assert.True(t, f.Match("mem"))
|
|
||||||
|
|
||||||
f, err = CompileFilter([]string{"cpu", "mem", "net*"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.False(t, f.Match("cpu0"))
|
|
||||||
assert.True(t, f.Match("mem"))
|
|
||||||
assert.True(t, f.Match("network"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRandomSleep(t *testing.T) {
|
func TestRandomSleep(t *testing.T) {
|
||||||
// test that zero max returns immediately
|
// test that zero max returns immediately
|
||||||
s := time.Now()
|
s := time.Now()
|
||||||
|
|||||||
@@ -3,80 +3,78 @@ package internal_models
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/gobwas/glob"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/filter"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TagFilter is the name of a tag, and the values on which to filter
|
// TagFilter is the name of a tag, and the values on which to filter
|
||||||
type TagFilter struct {
|
type TagFilter struct {
|
||||||
Name string
|
Name string
|
||||||
Filter []string
|
Filter []string
|
||||||
filter glob.Glob
|
filter filter.Filter
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
// Filter containing drop/pass and tagdrop/tagpass rules
|
||||||
type Filter struct {
|
type Filter struct {
|
||||||
NameDrop []string
|
NameDrop []string
|
||||||
nameDrop glob.Glob
|
nameDrop filter.Filter
|
||||||
NamePass []string
|
NamePass []string
|
||||||
namePass glob.Glob
|
namePass filter.Filter
|
||||||
|
|
||||||
FieldDrop []string
|
FieldDrop []string
|
||||||
fieldDrop glob.Glob
|
fieldDrop filter.Filter
|
||||||
FieldPass []string
|
FieldPass []string
|
||||||
fieldPass glob.Glob
|
fieldPass filter.Filter
|
||||||
|
|
||||||
TagDrop []TagFilter
|
TagDrop []TagFilter
|
||||||
TagPass []TagFilter
|
TagPass []TagFilter
|
||||||
|
|
||||||
TagExclude []string
|
TagExclude []string
|
||||||
tagExclude glob.Glob
|
tagExclude filter.Filter
|
||||||
TagInclude []string
|
TagInclude []string
|
||||||
tagInclude glob.Glob
|
tagInclude filter.Filter
|
||||||
|
|
||||||
IsActive bool
|
IsActive bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compile all Filter lists into glob.Glob objects.
|
// Compile all Filter lists into filter.Filter objects.
|
||||||
func (f *Filter) CompileFilter() error {
|
func (f *Filter) CompileFilter() error {
|
||||||
var err error
|
var err error
|
||||||
f.nameDrop, err = internal.CompileFilter(f.NameDrop)
|
f.nameDrop, err = filter.CompileFilter(f.NameDrop)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
||||||
}
|
}
|
||||||
f.namePass, err = internal.CompileFilter(f.NamePass)
|
f.namePass, err = filter.CompileFilter(f.NamePass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.fieldDrop, err = internal.CompileFilter(f.FieldDrop)
|
f.fieldDrop, err = filter.CompileFilter(f.FieldDrop)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
||||||
}
|
}
|
||||||
f.fieldPass, err = internal.CompileFilter(f.FieldPass)
|
f.fieldPass, err = filter.CompileFilter(f.FieldPass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.tagExclude, err = internal.CompileFilter(f.TagExclude)
|
f.tagExclude, err = filter.CompileFilter(f.TagExclude)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
||||||
}
|
}
|
||||||
f.tagInclude, err = internal.CompileFilter(f.TagInclude)
|
f.tagInclude, err = filter.CompileFilter(f.TagInclude)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, _ := range f.TagDrop {
|
for i, _ := range f.TagDrop {
|
||||||
f.TagDrop[i].filter, err = internal.CompileFilter(f.TagDrop[i].Filter)
|
f.TagDrop[i].filter, err = filter.CompileFilter(f.TagDrop[i].Filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i, _ := range f.TagPass {
|
for i, _ := range f.TagPass {
|
||||||
f.TagPass[i].filter, err = internal.CompileFilter(f.TagPass[i].Filter)
|
f.TagPass[i].filter, err = filter.CompileFilter(f.TagPass[i].Filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -253,51 +253,6 @@ func TestFilter_TagDrop(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter_CompileFilterError(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
NameDrop: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
NamePass: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
FieldDrop: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
FieldPass: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
TagExclude: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
f = Filter{
|
|
||||||
TagInclude: []string{"", ""},
|
|
||||||
}
|
|
||||||
assert.Error(t, f.CompileFilter())
|
|
||||||
filters := []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "cpu",
|
|
||||||
Filter: []string{"{foobar}"},
|
|
||||||
}}
|
|
||||||
f = Filter{
|
|
||||||
TagDrop: filters,
|
|
||||||
}
|
|
||||||
require.Error(t, f.CompileFilter())
|
|
||||||
filters = []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "cpu",
|
|
||||||
Filter: []string{"{foobar}"},
|
|
||||||
}}
|
|
||||||
f = Filter{
|
|
||||||
TagPass: filters,
|
|
||||||
}
|
|
||||||
require.Error(t, f.CompileFilter())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_ShouldMetricsPass(t *testing.T) {
|
func TestFilter_ShouldMetricsPass(t *testing.T) {
|
||||||
m := testutil.TestMetric(1, "testmetric")
|
m := testutil.TestMetric(1, "testmetric")
|
||||||
f := Filter{
|
f := Filter{
|
||||||
|
|||||||
@@ -45,14 +45,9 @@ func NewMetric(
|
|||||||
name string,
|
name string,
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
fields map[string]interface{},
|
fields map[string]interface{},
|
||||||
t ...time.Time,
|
t time.Time,
|
||||||
) (Metric, error) {
|
) (Metric, error) {
|
||||||
var T time.Time
|
pt, err := client.NewPoint(name, tags, fields, t)
|
||||||
if len(t) > 0 {
|
|
||||||
T = t[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
pt, err := client.NewPoint(name, tags, fields, T)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,23 +51,6 @@ func TestNewMetricString(t *testing.T) {
|
|||||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewMetricStringNoTime(t *testing.T) {
|
|
||||||
tags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
}
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage_idle": float64(99),
|
|
||||||
}
|
|
||||||
m, err := NewMetric("cpu", tags, fields)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
|
||||||
assert.Equal(t, lineProto, m.String())
|
|
||||||
|
|
||||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99")
|
|
||||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewMetricFailNaN(t *testing.T) {
|
func TestNewMetricFailNaN(t *testing.T) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ import (
|
|||||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||||
|
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||||
|
|||||||
@@ -177,8 +177,12 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error {
|
|||||||
// There were matches, so we'll append each match together with
|
// There were matches, so we'll append each match together with
|
||||||
// the arguments to the commands slice
|
// the arguments to the commands slice
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
commands = append(
|
if len(cmdAndArgs) == 1 {
|
||||||
commands, strings.Join([]string{match, cmdAndArgs[1]}, " "))
|
commands = append(commands, match)
|
||||||
|
} else {
|
||||||
|
commands = append(commands,
|
||||||
|
strings.Join([]string{match, cmdAndArgs[1]}, " "))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ var sampleConfig = `
|
|||||||
## an array of Zookeeper connection strings
|
## an array of Zookeeper connection strings
|
||||||
zookeeper_peers = ["localhost:2181"]
|
zookeeper_peers = ["localhost:2181"]
|
||||||
## Zookeeper Chroot
|
## Zookeeper Chroot
|
||||||
zookeeper_chroot = "/"
|
zookeeper_chroot = ""
|
||||||
## the name of the consumer group
|
## the name of the consumer group
|
||||||
consumer_group = "telegraf_metrics_consumers"
|
consumer_group = "telegraf_metrics_consumers"
|
||||||
## Offset (must be either "oldest" or "newest")
|
## Offset (must be either "oldest" or "newest")
|
||||||
|
|||||||
89
plugins/inputs/logparser/README.md
Normal file
89
plugins/inputs/logparser/README.md
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
# logparser Input Plugin
|
||||||
|
|
||||||
|
The logparser plugin streams and parses the given logfiles. Currently it only
|
||||||
|
has the capability of parsing "grok" patterns from logfiles, which also supports
|
||||||
|
regex patterns.
|
||||||
|
|
||||||
|
### Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.logparser]]
|
||||||
|
## Log files to parse.
|
||||||
|
## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
## ** as a "super asterisk". ie:
|
||||||
|
## /var/log/**.log -> recursively find all .log files in /var/log
|
||||||
|
## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||||
|
## /var/log/apache.log -> only tail the apache log file
|
||||||
|
files = ["/var/log/influxdb/influxdb.log"]
|
||||||
|
## Read file from beginning.
|
||||||
|
from_beginning = false
|
||||||
|
|
||||||
|
## Parse logstash-style "grok" patterns:
|
||||||
|
## Telegraf builtin parsing patterns: https://goo.gl/dkay10
|
||||||
|
[inputs.logparser.grok]
|
||||||
|
## This is a list of patterns to check the given log file(s) for.
|
||||||
|
## Note that adding patterns here increases processing time. The most
|
||||||
|
## efficient configuration is to have one file & pattern per logparser.
|
||||||
|
patterns = ["%{INFLUXDB_HTTPD_LOG}"]
|
||||||
|
## Full path(s) to custom pattern files.
|
||||||
|
custom_pattern_files = []
|
||||||
|
## Custom patterns can also be defined here. Put one pattern per line.
|
||||||
|
custom_patterns = '''
|
||||||
|
'''
|
||||||
|
```
|
||||||
|
|
||||||
|
## Grok Parser
|
||||||
|
|
||||||
|
The grok parser uses a slightly modified version of logstash "grok" patterns,
|
||||||
|
with the format `%{<capture_syntax>[:<semantic_name>][:<modifier>]}`
|
||||||
|
|
||||||
|
|
||||||
|
Telegraf has many of it's own
|
||||||
|
[built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logparser/grok/patterns/influx-patterns),
|
||||||
|
as well as supporting
|
||||||
|
[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns).
|
||||||
|
|
||||||
|
|
||||||
|
The best way to get acquainted with grok patterns is to read the logstash docs,
|
||||||
|
which are available here:
|
||||||
|
https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
|
||||||
|
|
||||||
|
|
||||||
|
If you need help building patterns to match your logs,
|
||||||
|
you will find the http://grokdebug.herokuapp.com application quite useful!
|
||||||
|
|
||||||
|
|
||||||
|
By default all named captures are converted into string fields.
|
||||||
|
Modifiers can be used to convert captures to other types or tags.
|
||||||
|
Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||||
|
parsed metric.
|
||||||
|
|
||||||
|
|
||||||
|
- Available modifiers:
|
||||||
|
- string (default if nothing is specified)
|
||||||
|
- int
|
||||||
|
- float
|
||||||
|
- duration (ie, 5.23ms gets converted to int nanoseconds)
|
||||||
|
- tag (converts the field into a tag)
|
||||||
|
- drop (drops the field completely)
|
||||||
|
- Timestamp modifiers:
|
||||||
|
- ts-ansic ("Mon Jan _2 15:04:05 2006")
|
||||||
|
- ts-unix ("Mon Jan _2 15:04:05 MST 2006")
|
||||||
|
- ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
|
||||||
|
- ts-rfc822 ("02 Jan 06 15:04 MST")
|
||||||
|
- ts-rfc822z ("02 Jan 06 15:04 -0700")
|
||||||
|
- ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
|
||||||
|
- ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
|
||||||
|
- ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
|
||||||
|
- ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
|
||||||
|
- ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
|
||||||
|
- ts-httpd ("02/Jan/2006:15:04:05 -0700")
|
||||||
|
- ts-epoch (seconds since unix epoch)
|
||||||
|
- ts-epochnano (nanoseconds since unix epoch)
|
||||||
|
- ts-"CUSTOM"
|
||||||
|
|
||||||
|
|
||||||
|
CUSTOM time layouts must be within quotes and be the representation of the
|
||||||
|
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`
|
||||||
|
See https://golang.org/pkg/time/#Parse for more details.
|
||||||
|
|
||||||
373
plugins/inputs/logparser/grok/grok.go
Normal file
373
plugins/inputs/logparser/grok/grok.go
Normal file
@@ -0,0 +1,373 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/vjeantet/grok"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
var timeFormats = map[string]string{
|
||||||
|
"ts-ansic": "Mon Jan _2 15:04:05 2006",
|
||||||
|
"ts-unix": "Mon Jan _2 15:04:05 MST 2006",
|
||||||
|
"ts-ruby": "Mon Jan 02 15:04:05 -0700 2006",
|
||||||
|
"ts-rfc822": "02 Jan 06 15:04 MST",
|
||||||
|
"ts-rfc822z": "02 Jan 06 15:04 -0700", // RFC822 with numeric zone
|
||||||
|
"ts-rfc850": "Monday, 02-Jan-06 15:04:05 MST",
|
||||||
|
"ts-rfc1123": "Mon, 02 Jan 2006 15:04:05 MST",
|
||||||
|
"ts-rfc1123z": "Mon, 02 Jan 2006 15:04:05 -0700", // RFC1123 with numeric zone
|
||||||
|
"ts-rfc3339": "2006-01-02T15:04:05Z07:00",
|
||||||
|
"ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00",
|
||||||
|
"ts-httpd": "02/Jan/2006:15:04:05 -0700",
|
||||||
|
"ts-epoch": "EPOCH",
|
||||||
|
"ts-epochnano": "EPOCH_NANO",
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
INT = "int"
|
||||||
|
TAG = "tag"
|
||||||
|
FLOAT = "float"
|
||||||
|
STRING = "string"
|
||||||
|
DURATION = "duration"
|
||||||
|
DROP = "drop"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// matches named captures that contain a type.
|
||||||
|
// ie,
|
||||||
|
// %{NUMBER:bytes:int}
|
||||||
|
// %{IPORHOST:clientip:tag}
|
||||||
|
// %{HTTPDATE:ts1:ts-http}
|
||||||
|
// %{HTTPDATE:ts2:ts-"02 Jan 06 15:04"}
|
||||||
|
typedRe = regexp.MustCompile(`%{\w+:(\w+):(ts-".+"|t?s?-?\w+)}`)
|
||||||
|
// matches a plain pattern name. ie, %{NUMBER}
|
||||||
|
patternOnlyRe = regexp.MustCompile(`%{(\w+)}`)
|
||||||
|
)
|
||||||
|
|
||||||
|
type Parser struct {
|
||||||
|
Patterns []string
|
||||||
|
CustomPatterns string
|
||||||
|
CustomPatternFiles []string
|
||||||
|
|
||||||
|
// typeMap is a map of patterns -> capture name -> modifier,
|
||||||
|
// ie, {
|
||||||
|
// "%{TESTLOG}":
|
||||||
|
// {
|
||||||
|
// "bytes": "int",
|
||||||
|
// "clientip": "tag"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
typeMap map[string]map[string]string
|
||||||
|
// tsMap is a map of patterns -> capture name -> timestamp layout.
|
||||||
|
// ie, {
|
||||||
|
// "%{TESTLOG}":
|
||||||
|
// {
|
||||||
|
// "httptime": "02/Jan/2006:15:04:05 -0700"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
tsMap map[string]map[string]string
|
||||||
|
// patterns is a map of all of the parsed patterns from CustomPatterns
|
||||||
|
// and CustomPatternFiles.
|
||||||
|
// ie, {
|
||||||
|
// "DURATION": "%{NUMBER}[nuµm]?s"
|
||||||
|
// "RESPONSE_CODE": "%{NUMBER:rc:tag}"
|
||||||
|
// }
|
||||||
|
patterns map[string]string
|
||||||
|
|
||||||
|
g *grok.Grok
|
||||||
|
tsModder *tsModder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) Compile() error {
|
||||||
|
p.typeMap = make(map[string]map[string]string)
|
||||||
|
p.tsMap = make(map[string]map[string]string)
|
||||||
|
p.patterns = make(map[string]string)
|
||||||
|
p.tsModder = &tsModder{}
|
||||||
|
var err error
|
||||||
|
p.g, err = grok.NewWithConfig(&grok.Config{NamedCapturesOnly: true})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.CustomPatterns = DEFAULT_PATTERNS + p.CustomPatterns
|
||||||
|
|
||||||
|
if len(p.CustomPatterns) != 0 {
|
||||||
|
scanner := bufio.NewScanner(strings.NewReader(p.CustomPatterns))
|
||||||
|
p.addCustomPatterns(scanner)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, filename := range p.CustomPatternFiles {
|
||||||
|
file, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bufio.NewReader(file))
|
||||||
|
p.addCustomPatterns(scanner)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.compileCustomPatterns()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||||
|
var err error
|
||||||
|
var values map[string]string
|
||||||
|
// the matching pattern string
|
||||||
|
var patternName string
|
||||||
|
for _, pattern := range p.Patterns {
|
||||||
|
if values, err = p.g.Parse(pattern, line); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(values) != 0 {
|
||||||
|
patternName = pattern
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(values) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags := make(map[string]string)
|
||||||
|
timestamp := time.Now()
|
||||||
|
for k, v := range values {
|
||||||
|
if k == "" || v == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var t string
|
||||||
|
// check if pattern has some modifiers
|
||||||
|
if types, ok := p.typeMap[patternName]; ok {
|
||||||
|
t = types[k]
|
||||||
|
}
|
||||||
|
// if we didn't find a modifier, check if we have a timestamp layout
|
||||||
|
if t == "" {
|
||||||
|
if ts, ok := p.tsMap[patternName]; ok {
|
||||||
|
// check if the modifier is a timestamp layout
|
||||||
|
if layout, ok := ts[k]; ok {
|
||||||
|
t = layout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if we didn't find a type OR timestamp modifier, assume string
|
||||||
|
if t == "" {
|
||||||
|
t = STRING
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case INT:
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = iv
|
||||||
|
}
|
||||||
|
case FLOAT:
|
||||||
|
fv, err := strconv.ParseFloat(v, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to float: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = fv
|
||||||
|
}
|
||||||
|
case DURATION:
|
||||||
|
d, err := time.ParseDuration(v)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to duration: %s", v, err)
|
||||||
|
} else {
|
||||||
|
fields[k] = int64(d)
|
||||||
|
}
|
||||||
|
case TAG:
|
||||||
|
tags[k] = v
|
||||||
|
case STRING:
|
||||||
|
fields[k] = strings.Trim(v, `"`)
|
||||||
|
case "EPOCH":
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
timestamp = time.Unix(iv, 0)
|
||||||
|
}
|
||||||
|
case "EPOCH_NANO":
|
||||||
|
iv, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||||
|
} else {
|
||||||
|
timestamp = time.Unix(0, iv)
|
||||||
|
}
|
||||||
|
case DROP:
|
||||||
|
// goodbye!
|
||||||
|
default:
|
||||||
|
ts, err := time.Parse(t, v)
|
||||||
|
if err == nil {
|
||||||
|
timestamp = ts
|
||||||
|
} else {
|
||||||
|
log.Printf("ERROR parsing %s to time layout [%s]: %s", v, t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return telegraf.NewMetric("logparser_grok", tags, fields, p.tsModder.tsMod(timestamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) {
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if len(line) > 0 && line[0] != '#' {
|
||||||
|
names := strings.SplitN(line, " ", 2)
|
||||||
|
p.patterns[names[0]] = names[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) compileCustomPatterns() error {
|
||||||
|
var err error
|
||||||
|
// check if the pattern contains a subpattern that is already defined
|
||||||
|
// replace it with the subpattern for modifier inheritance.
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
for name, pattern := range p.patterns {
|
||||||
|
subNames := patternOnlyRe.FindAllStringSubmatch(pattern, -1)
|
||||||
|
for _, subName := range subNames {
|
||||||
|
if subPattern, ok := p.patterns[subName[1]]; ok {
|
||||||
|
pattern = strings.Replace(pattern, subName[0], subPattern, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.patterns[name] = pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if pattern contains modifiers. Parse them out if it does.
|
||||||
|
for name, pattern := range p.patterns {
|
||||||
|
if typedRe.MatchString(pattern) {
|
||||||
|
// this pattern has modifiers, so parse out the modifiers
|
||||||
|
pattern, err = p.parseTypedCaptures(name, pattern)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.patterns[name] = pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.g.AddPatternsFromMap(p.patterns)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTypedCaptures parses the capture types, and then deletes the type from
|
||||||
|
// the line so that it is a valid "grok" pattern again.
|
||||||
|
// ie,
|
||||||
|
// %{NUMBER:bytes:int} => %{NUMBER:bytes} (stores %{NUMBER}->bytes->int)
|
||||||
|
// %{IPORHOST:clientip:tag} => %{IPORHOST:clientip} (stores %{IPORHOST}->clientip->tag)
|
||||||
|
func (p *Parser) parseTypedCaptures(name, pattern string) (string, error) {
|
||||||
|
matches := typedRe.FindAllStringSubmatch(pattern, -1)
|
||||||
|
|
||||||
|
// grab the name of the capture pattern
|
||||||
|
patternName := "%{" + name + "}"
|
||||||
|
// create type map for this pattern
|
||||||
|
p.typeMap[patternName] = make(map[string]string)
|
||||||
|
p.tsMap[patternName] = make(map[string]string)
|
||||||
|
|
||||||
|
// boolean to verify that each pattern only has a single ts- data type.
|
||||||
|
hasTimestamp := false
|
||||||
|
for _, match := range matches {
|
||||||
|
// regex capture 1 is the name of the capture
|
||||||
|
// regex capture 2 is the type of the capture
|
||||||
|
if strings.HasPrefix(match[2], "ts-") {
|
||||||
|
if hasTimestamp {
|
||||||
|
return pattern, fmt.Errorf("logparser pattern compile error: "+
|
||||||
|
"Each pattern is allowed only one named "+
|
||||||
|
"timestamp data type. pattern: %s", pattern)
|
||||||
|
}
|
||||||
|
if f, ok := timeFormats[match[2]]; ok {
|
||||||
|
p.tsMap[patternName][match[1]] = f
|
||||||
|
} else {
|
||||||
|
p.tsMap[patternName][match[1]] = strings.TrimSuffix(strings.TrimPrefix(match[2], `ts-"`), `"`)
|
||||||
|
}
|
||||||
|
hasTimestamp = true
|
||||||
|
} else {
|
||||||
|
p.typeMap[patternName][match[1]] = match[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// the modifier is not a valid part of a "grok" pattern, so remove it
|
||||||
|
// from the pattern.
|
||||||
|
pattern = strings.Replace(pattern, ":"+match[2]+"}", "}", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pattern, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsModder is a struct for incrementing identical timestamps of log lines
|
||||||
|
// so that we don't push identical metrics that will get overwritten.
|
||||||
|
type tsModder struct {
|
||||||
|
dupe time.Time
|
||||||
|
last time.Time
|
||||||
|
incr time.Duration
|
||||||
|
incrn time.Duration
|
||||||
|
rollover time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsMod increments the given timestamp one unit more from the previous
|
||||||
|
// duplicate timestamp.
|
||||||
|
// the increment unit is determined as the next smallest time unit below the
|
||||||
|
// most significant time unit of ts.
|
||||||
|
// ie, if the input is at ms precision, it will increment it 1µs.
|
||||||
|
func (t *tsModder) tsMod(ts time.Time) time.Time {
|
||||||
|
defer func() { t.last = ts }()
|
||||||
|
// don't mod the time if we don't need to
|
||||||
|
if t.last.IsZero() || ts.IsZero() {
|
||||||
|
t.incrn = 0
|
||||||
|
t.rollover = 0
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
if !ts.Equal(t.last) && !ts.Equal(t.dupe) {
|
||||||
|
t.incr = 0
|
||||||
|
t.incrn = 0
|
||||||
|
t.rollover = 0
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.Equal(t.last) {
|
||||||
|
t.dupe = ts
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.Equal(t.dupe) && t.incr == time.Duration(0) {
|
||||||
|
tsNano := ts.UnixNano()
|
||||||
|
|
||||||
|
d := int64(10)
|
||||||
|
counter := 1
|
||||||
|
for {
|
||||||
|
a := tsNano % d
|
||||||
|
if a > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d = d * 10
|
||||||
|
counter++
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case counter <= 6:
|
||||||
|
t.incr = time.Nanosecond
|
||||||
|
case counter <= 9:
|
||||||
|
t.incr = time.Microsecond
|
||||||
|
case counter > 9:
|
||||||
|
t.incr = time.Millisecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.incrn++
|
||||||
|
if t.incrn == 999 && t.incr > time.Nanosecond {
|
||||||
|
t.rollover = t.incr * t.incrn
|
||||||
|
t.incrn = 1
|
||||||
|
t.incr = t.incr / 1000
|
||||||
|
if t.incr < time.Nanosecond {
|
||||||
|
t.incr = time.Nanosecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ts.Add(t.incr*t.incrn + t.rollover)
|
||||||
|
}
|
||||||
508
plugins/inputs/logparser/grok/grok_test.go
Normal file
508
plugins/inputs/logparser/grok/grok_test.go
Normal file
@@ -0,0 +1,508 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var benchM telegraf.Metric
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_CommonLogFormat(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_CombinedLogFormat(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{COMBINED_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_InfluxLog(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"},
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`[httpd] 192.168.1.1 - - [14/Jun/2016:11:33:29 +0100] "POST /write?consistency=any&db=telegraf&precision=ns&rp= HTTP/1.1" 204 0 "-" "InfluxDBClient" 6f61bc44-321b-11e6-8050-000000000000 2513`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_InfluxLog_NoMatch(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"},
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`[retention] 2016/06/14 14:38:24 retention policy shard deletion check commencing`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark_ParseLine_CustomPattern(b *testing.B) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time:duration}
|
||||||
|
TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
p.Compile()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
m, _ = p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||||
|
}
|
||||||
|
benchM = m
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuiltinInfluxdbHttpd(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{INFLUXDB_HTTPD_LOG}"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
// Parse an influxdb POST request
|
||||||
|
m, err := p.ParseLine(`[httpd] ::1 - - [14/Jun/2016:11:33:29 +0100] "POST /write?consistency=any&db=telegraf&precision=ns&rp= HTTP/1.1" 204 0 "-" "InfluxDBClient" 6f61bc44-321b-11e6-8050-000000000000 2513`)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"resp_bytes": int64(0),
|
||||||
|
"auth": "-",
|
||||||
|
"client_ip": "::1",
|
||||||
|
"resp_code": int64(204),
|
||||||
|
"http_version": float64(1.1),
|
||||||
|
"ident": "-",
|
||||||
|
"referrer": "-",
|
||||||
|
"request": "/write?consistency=any&db=telegraf&precision=ns&rp=",
|
||||||
|
"response_time_us": int64(2513),
|
||||||
|
"agent": "InfluxDBClient",
|
||||||
|
},
|
||||||
|
m.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"verb": "POST"}, m.Tags())
|
||||||
|
|
||||||
|
// Parse an influxdb GET request
|
||||||
|
m, err = p.ParseLine(`[httpd] ::1 - - [14/Jun/2016:12:10:02 +0100] "GET /query?db=telegraf&q=SELECT+bytes%2Cresponse_time_us+FROM+logparser_grok+WHERE+http_method+%3D+%27GET%27+AND+response_time_us+%3E+0+AND+time+%3E+now%28%29+-+1h HTTP/1.1" 200 578 "http://localhost:8083/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36" 8a3806f1-3220-11e6-8006-000000000000 988`)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"resp_bytes": int64(578),
|
||||||
|
"auth": "-",
|
||||||
|
"client_ip": "::1",
|
||||||
|
"resp_code": int64(200),
|
||||||
|
"http_version": float64(1.1),
|
||||||
|
"ident": "-",
|
||||||
|
"referrer": "http://localhost:8083/",
|
||||||
|
"request": "/query?db=telegraf&q=SELECT+bytes%2Cresponse_time_us+FROM+logparser_grok+WHERE+http_method+%3D+%27GET%27+AND+response_time_us+%3E+0+AND+time+%3E+now%28%29+-+1h",
|
||||||
|
"response_time_us": int64(988),
|
||||||
|
"agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36",
|
||||||
|
},
|
||||||
|
m.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"verb": "GET"}, m.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
// common log format
|
||||||
|
// 127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326
|
||||||
|
func TestBuiltinCommonLogFormat(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
// Parse an influxdb POST request
|
||||||
|
m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"resp_bytes": int64(2326),
|
||||||
|
"auth": "frank",
|
||||||
|
"client_ip": "127.0.0.1",
|
||||||
|
"resp_code": int64(200),
|
||||||
|
"http_version": float64(1.0),
|
||||||
|
"ident": "user-identifier",
|
||||||
|
"request": "/apache_pb.gif",
|
||||||
|
},
|
||||||
|
m.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"verb": "GET"}, m.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
// combined log format
|
||||||
|
// 127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"
|
||||||
|
func TestBuiltinCombinedLogFormat(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{COMBINED_LOG_FORMAT}"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
// Parse an influxdb POST request
|
||||||
|
m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"`)
|
||||||
|
require.NotNil(t, m)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"resp_bytes": int64(2326),
|
||||||
|
"auth": "frank",
|
||||||
|
"client_ip": "127.0.0.1",
|
||||||
|
"resp_code": int64(200),
|
||||||
|
"http_version": float64(1.0),
|
||||||
|
"ident": "user-identifier",
|
||||||
|
"request": "/apache_pb.gif",
|
||||||
|
"referrer": "-",
|
||||||
|
"agent": "Mozilla",
|
||||||
|
},
|
||||||
|
m.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"verb": "GET"}, m.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileStringAndParse(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time:duration}
|
||||||
|
TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": float64(1.25),
|
||||||
|
"response_time": int64(5432),
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseEpochNano(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{MYAPP}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
MYAPP %{POSINT:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1466004605359052000 response_time=20821 mymetric=10890.645`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"response_time": int64(20821),
|
||||||
|
"metric": float64(10890.645),
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||||
|
assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseEpoch(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{MYAPP}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
MYAPP %{POSINT:ts:ts-epoch} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1466004605 response_time=20821 mymetric=10890.645`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"response_time": int64(20821),
|
||||||
|
"metric": float64(10890.645),
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||||
|
assert.Equal(t, time.Unix(1466004605, 0), metricA.Time())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseEpochErrors(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{MYAPP}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
MYAPP %{WORD:ts:ts-epoch} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
_, err := p.ParseLine(`foobar response_time=20821 mymetric=10890.645`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{MYAPP}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
MYAPP %{WORD:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
_, err = p.ParseLine(`foobar response_time=20821 mymetric=10890.645`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileFileAndParse(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": float64(1.25),
|
||||||
|
"response_time": int64(5432),
|
||||||
|
"myint": int64(101),
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags())
|
||||||
|
assert.Equal(t,
|
||||||
|
time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(),
|
||||||
|
metricA.Time().Nanosecond())
|
||||||
|
|
||||||
|
metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`)
|
||||||
|
require.NotNil(t, metricB)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"myfloat": 1.25,
|
||||||
|
"mystring": "mystring",
|
||||||
|
"nomodifier": "nomodifier",
|
||||||
|
},
|
||||||
|
metricB.Fields())
|
||||||
|
assert.Equal(t, map[string]string{}, metricB.Tags())
|
||||||
|
assert.Equal(t,
|
||||||
|
time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(),
|
||||||
|
metricB.Time().Nanosecond())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileNoModifiersAndParse(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_C}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
TEST_LOG_C %{NUMBER:myfloat} %{NUMBER} %{IPORHOST:clientip} %{DURATION:rt}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||||
|
require.NotNil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t,
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": "1.25",
|
||||||
|
"rt": "5.432µs",
|
||||||
|
},
|
||||||
|
metricA.Fields())
|
||||||
|
assert.Equal(t, map[string]string{}, metricA.Tags())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileNoNamesAndParse(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_C}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
TEST_LOG_C %{NUMBER} %{NUMBER} %{IPORHOST} %{DURATION}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`)
|
||||||
|
require.Nil(t, metricA)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseNoMatch(t *testing.T) {
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatternFiles: []string{"./testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] notnumber 200 192.168.1.1 5.432µs 101`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Nil(t, metricA)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompileErrors(t *testing.T) {
|
||||||
|
// Compile fails because there are multiple timestamps:
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts1:ts-httpd} %{HTTPDATE:ts2:ts-httpd} %{NUMBER:mynum:int}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.Error(t, p.Compile())
|
||||||
|
|
||||||
|
// Compile fails because file doesn't exist:
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatternFiles: []string{"/tmp/foo/bar/baz"},
|
||||||
|
}
|
||||||
|
assert.Error(t, p.Compile())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseErrors(t *testing.T) {
|
||||||
|
// Parse fails because the pattern doesn't exist
|
||||||
|
p := &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_B}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:int} %{}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
_, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] notnumber 200 192.168.1.1 5.432µs 101`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Parse fails because myword is not an int
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:int}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
_, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Parse fails because myword is not a float
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:float}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
_, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Parse fails because myword is not a duration
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:duration}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
_, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Parse fails because the time layout is wrong.
|
||||||
|
p = &Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}"},
|
||||||
|
CustomPatterns: `
|
||||||
|
TEST_LOG_A %{HTTPDATE:ts:ts-unix} %{WORD:myword:duration}
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
_, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`)
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTsModder(t *testing.T) {
|
||||||
|
tsm := &tsModder{}
|
||||||
|
|
||||||
|
reftime := time.Date(2006, time.December, 1, 1, 1, 1, int(time.Millisecond), time.UTC)
|
||||||
|
modt := tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Microsecond*1), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Microsecond*2), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Microsecond*3), modt)
|
||||||
|
|
||||||
|
reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond), time.UTC)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*1), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*2), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*3), modt)
|
||||||
|
|
||||||
|
reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond)*999, time.UTC)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*1), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*2), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*3), modt)
|
||||||
|
|
||||||
|
reftime = time.Date(2006, time.December, 1, 1, 1, 1, 0, time.UTC)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Millisecond*1), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Millisecond*2), modt)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime.Add(time.Millisecond*3), modt)
|
||||||
|
|
||||||
|
reftime = time.Time{}
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
assert.Equal(t, reftime, modt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTsModder_Rollover(t *testing.T) {
|
||||||
|
tsm := &tsModder{}
|
||||||
|
|
||||||
|
reftime := time.Date(2006, time.December, 1, 1, 1, 1, int(time.Millisecond), time.UTC)
|
||||||
|
modt := tsm.tsMod(reftime)
|
||||||
|
for i := 1; i < 1000; i++ {
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
}
|
||||||
|
assert.Equal(t, reftime.Add(time.Microsecond*999+time.Nanosecond), modt)
|
||||||
|
|
||||||
|
reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond), time.UTC)
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
for i := 1; i < 1001; i++ {
|
||||||
|
modt = tsm.tsMod(reftime)
|
||||||
|
}
|
||||||
|
assert.Equal(t, reftime.Add(time.Nanosecond*1000), modt)
|
||||||
|
}
|
||||||
80
plugins/inputs/logparser/grok/influx_patterns.go
Normal file
80
plugins/inputs/logparser/grok/influx_patterns.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
package grok
|
||||||
|
|
||||||
|
// THIS SHOULD BE KEPT IN-SYNC WITH patterns/influx-patterns
|
||||||
|
const DEFAULT_PATTERNS = `
|
||||||
|
# Captures are a slightly modified version of logstash "grok" patterns, with
|
||||||
|
# the format %{<capture syntax>[:<semantic name>][:<modifier>]}
|
||||||
|
# By default all named captures are converted into string fields.
|
||||||
|
# Modifiers can be used to convert captures to other types or tags.
|
||||||
|
# Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||||
|
# parsed metric.
|
||||||
|
|
||||||
|
# View logstash grok pattern docs here:
|
||||||
|
# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
|
||||||
|
# All default logstash patterns are supported, these can be viewed here:
|
||||||
|
# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns
|
||||||
|
|
||||||
|
# Available modifiers:
|
||||||
|
# string (default if nothing is specified)
|
||||||
|
# int
|
||||||
|
# float
|
||||||
|
# duration (ie, 5.23ms gets converted to int nanoseconds)
|
||||||
|
# tag (converts the field into a tag)
|
||||||
|
# drop (drops the field completely)
|
||||||
|
# Timestamp modifiers:
|
||||||
|
# ts-ansic ("Mon Jan _2 15:04:05 2006")
|
||||||
|
# ts-unix ("Mon Jan _2 15:04:05 MST 2006")
|
||||||
|
# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
|
||||||
|
# ts-rfc822 ("02 Jan 06 15:04 MST")
|
||||||
|
# ts-rfc822z ("02 Jan 06 15:04 -0700")
|
||||||
|
# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
|
||||||
|
# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
|
||||||
|
# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
|
||||||
|
# ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
|
||||||
|
# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
|
||||||
|
# ts-httpd ("02/Jan/2006:15:04:05 -0700")
|
||||||
|
# ts-epoch (seconds since unix epoch)
|
||||||
|
# ts-epochnano (nanoseconds since unix epoch)
|
||||||
|
# ts-"CUSTOM"
|
||||||
|
# CUSTOM time layouts must be within quotes and be the representation of the
|
||||||
|
# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006
|
||||||
|
# See https://golang.org/pkg/time/#Parse for more details.
|
||||||
|
|
||||||
|
# Example log file pattern, example log looks like this:
|
||||||
|
# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs
|
||||||
|
# Breakdown of the DURATION pattern below:
|
||||||
|
# NUMBER is a builtin logstash grok pattern matching float & int numbers.
|
||||||
|
# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets.
|
||||||
|
# s is also regex, this pattern must end in "s".
|
||||||
|
# so DURATION will match something like '5.324ms' or '6.1µs' or '10s'
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time_ns:duration}
|
||||||
|
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
|
||||||
|
# Wider-ranging username matching vs. logstash built-in %{USER}
|
||||||
|
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
|
||||||
|
NGUSER %{NGUSERNAME}
|
||||||
|
|
||||||
|
##
|
||||||
|
## COMMON LOG PATTERNS
|
||||||
|
##
|
||||||
|
|
||||||
|
# InfluxDB log patterns
|
||||||
|
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||||
|
INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}
|
||||||
|
|
||||||
|
# apache & nginx logs, this is also known as the "common log format"
|
||||||
|
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||||
|
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:int} (?:%{NUMBER:resp_bytes:int}|-)
|
||||||
|
|
||||||
|
# Combined log format is the same as the common log format but with the addition
|
||||||
|
# of two quoted strings at the end for "referrer" and "agent"
|
||||||
|
# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html
|
||||||
|
COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent}
|
||||||
|
|
||||||
|
# HTTPD log formats
|
||||||
|
HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg}
|
||||||
|
HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message}
|
||||||
|
HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG}
|
||||||
|
`
|
||||||
75
plugins/inputs/logparser/grok/patterns/influx-patterns
Normal file
75
plugins/inputs/logparser/grok/patterns/influx-patterns
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Captures are a slightly modified version of logstash "grok" patterns, with
|
||||||
|
# the format %{<capture syntax>[:<semantic name>][:<modifier>]}
|
||||||
|
# By default all named captures are converted into string fields.
|
||||||
|
# Modifiers can be used to convert captures to other types or tags.
|
||||||
|
# Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||||
|
# parsed metric.
|
||||||
|
|
||||||
|
# View logstash grok pattern docs here:
|
||||||
|
# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
|
||||||
|
# All default logstash patterns are supported, these can be viewed here:
|
||||||
|
# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns
|
||||||
|
|
||||||
|
# Available modifiers:
|
||||||
|
# string (default if nothing is specified)
|
||||||
|
# int
|
||||||
|
# float
|
||||||
|
# duration (ie, 5.23ms gets converted to int nanoseconds)
|
||||||
|
# tag (converts the field into a tag)
|
||||||
|
# drop (drops the field completely)
|
||||||
|
# Timestamp modifiers:
|
||||||
|
# ts-ansic ("Mon Jan _2 15:04:05 2006")
|
||||||
|
# ts-unix ("Mon Jan _2 15:04:05 MST 2006")
|
||||||
|
# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006")
|
||||||
|
# ts-rfc822 ("02 Jan 06 15:04 MST")
|
||||||
|
# ts-rfc822z ("02 Jan 06 15:04 -0700")
|
||||||
|
# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST")
|
||||||
|
# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST")
|
||||||
|
# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700")
|
||||||
|
# ts-rfc3339 ("2006-01-02T15:04:05Z07:00")
|
||||||
|
# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00")
|
||||||
|
# ts-httpd ("02/Jan/2006:15:04:05 -0700")
|
||||||
|
# ts-epoch (seconds since unix epoch)
|
||||||
|
# ts-epochnano (nanoseconds since unix epoch)
|
||||||
|
# ts-"CUSTOM"
|
||||||
|
# CUSTOM time layouts must be within quotes and be the representation of the
|
||||||
|
# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006
|
||||||
|
# See https://golang.org/pkg/time/#Parse for more details.
|
||||||
|
|
||||||
|
# Example log file pattern, example log looks like this:
|
||||||
|
# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs
|
||||||
|
# Breakdown of the DURATION pattern below:
|
||||||
|
# NUMBER is a builtin logstash grok pattern matching float & int numbers.
|
||||||
|
# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets.
|
||||||
|
# s is also regex, this pattern must end in "s".
|
||||||
|
# so DURATION will match something like '5.324ms' or '6.1µs' or '10s'
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time_ns:duration}
|
||||||
|
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||||
|
|
||||||
|
# Wider-ranging username matching vs. logstash built-in %{USER}
|
||||||
|
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
|
||||||
|
NGUSER %{NGUSERNAME}
|
||||||
|
|
||||||
|
##
|
||||||
|
## COMMON LOG PATTERNS
|
||||||
|
##
|
||||||
|
|
||||||
|
# InfluxDB log patterns
|
||||||
|
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||||
|
INFLUXDB_HTTPD_LOG \[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}
|
||||||
|
|
||||||
|
# apache & nginx logs, this is also known as the "common log format"
|
||||||
|
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||||
|
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:int} (?:%{NUMBER:resp_bytes:int}|-)
|
||||||
|
|
||||||
|
# Combined log format is the same as the common log format but with the addition
|
||||||
|
# of two quoted strings at the end for "referrer" and "agent"
|
||||||
|
# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html
|
||||||
|
COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent}
|
||||||
|
|
||||||
|
# HTTPD log formats
|
||||||
|
HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg}
|
||||||
|
HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message}
|
||||||
|
HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG}
|
||||||
14
plugins/inputs/logparser/grok/testdata/test-patterns
vendored
Normal file
14
plugins/inputs/logparser/grok/testdata/test-patterns
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Test A log line:
|
||||||
|
# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101
|
||||||
|
DURATION %{NUMBER}[nuµm]?s
|
||||||
|
RESPONSE_CODE %{NUMBER:response_code:tag}
|
||||||
|
RESPONSE_TIME %{DURATION:response_time:duration}
|
||||||
|
TEST_LOG_A \[%{HTTPDATE:timestamp:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} %{NUMBER:myint:int}
|
||||||
|
|
||||||
|
# Test B log line:
|
||||||
|
# [04/06/2016--12:41:45] 1.25 mystring dropme nomodifier
|
||||||
|
TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME}
|
||||||
|
TEST_LOG_B \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:string} %{WORD:dropme:drop} %{WORD:nomodifier}
|
||||||
|
|
||||||
|
TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME}
|
||||||
|
TEST_LOG_BAD \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:int} %{WORD:dropme:drop} %{WORD:nomodifier}
|
||||||
1
plugins/inputs/logparser/grok/testdata/test_a.log
vendored
Normal file
1
plugins/inputs/logparser/grok/testdata/test_a.log
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101
|
||||||
1
plugins/inputs/logparser/grok/testdata/test_b.log
vendored
Normal file
1
plugins/inputs/logparser/grok/testdata/test_b.log
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier
|
||||||
228
plugins/inputs/logparser/logparser.go
Normal file
228
plugins/inputs/logparser/logparser.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
package logparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hpcloud/tail"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/globpath"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
|
||||||
|
// Parsers
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/logparser/grok"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LogParser interface {
|
||||||
|
ParseLine(line string) (telegraf.Metric, error)
|
||||||
|
Compile() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type LogParserPlugin struct {
|
||||||
|
Files []string
|
||||||
|
FromBeginning bool
|
||||||
|
|
||||||
|
tailers []*tail.Tail
|
||||||
|
lines chan string
|
||||||
|
done chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
|
acc telegraf.Accumulator
|
||||||
|
parsers []LogParser
|
||||||
|
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
GrokParser *grok.Parser `toml:"grok"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const sampleConfig = `
|
||||||
|
## Log files to parse.
|
||||||
|
## These accept standard unix glob matching rules, but with the addition of
|
||||||
|
## ** as a "super asterisk". ie:
|
||||||
|
## /var/log/**.log -> recursively find all .log files in /var/log
|
||||||
|
## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
|
||||||
|
## /var/log/apache.log -> only tail the apache log file
|
||||||
|
files = ["/var/log/influxdb/influxdb.log"]
|
||||||
|
## Read file from beginning.
|
||||||
|
from_beginning = false
|
||||||
|
|
||||||
|
## Parse logstash-style "grok" patterns:
|
||||||
|
## Telegraf built-in parsing patterns: https://goo.gl/dkay10
|
||||||
|
[inputs.logparser.grok]
|
||||||
|
## This is a list of patterns to check the given log file(s) for.
|
||||||
|
## Note that adding patterns here increases processing time. The most
|
||||||
|
## efficient configuration is to have one pattern per logparser.
|
||||||
|
## Other common built-in patterns are:
|
||||||
|
## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
|
||||||
|
## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
|
||||||
|
patterns = ["%{INFLUXDB_HTTPD_LOG}"]
|
||||||
|
## Full path(s) to custom pattern files.
|
||||||
|
custom_pattern_files = []
|
||||||
|
## Custom patterns can also be defined here. Put one pattern per line.
|
||||||
|
custom_patterns = '''
|
||||||
|
'''
|
||||||
|
`
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) Description() string {
|
||||||
|
return "Stream and parse log file(s)."
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
|
||||||
|
l.acc = acc
|
||||||
|
l.lines = make(chan string, 1000)
|
||||||
|
l.done = make(chan struct{})
|
||||||
|
|
||||||
|
// Looks for fields which implement LogParser interface
|
||||||
|
l.parsers = []LogParser{}
|
||||||
|
s := reflect.ValueOf(l).Elem()
|
||||||
|
for i := 0; i < s.NumField(); i++ {
|
||||||
|
f := s.Field(i)
|
||||||
|
|
||||||
|
if !f.CanInterface() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if lpPlugin, ok := f.Interface().(LogParser); ok {
|
||||||
|
if reflect.ValueOf(lpPlugin).IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
l.parsers = append(l.parsers, lpPlugin)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(l.parsers) == 0 {
|
||||||
|
return fmt.Errorf("ERROR: logparser input plugin: no parser defined.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// compile log parser patterns:
|
||||||
|
for _, parser := range l.parsers {
|
||||||
|
if err := parser.Compile(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var seek tail.SeekInfo
|
||||||
|
if !l.FromBeginning {
|
||||||
|
seek.Whence = 2
|
||||||
|
seek.Offset = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
l.wg.Add(1)
|
||||||
|
go l.parser()
|
||||||
|
|
||||||
|
var errS string
|
||||||
|
// Create a "tailer" for each file
|
||||||
|
for _, filepath := range l.Files {
|
||||||
|
g, err := globpath.Compile(filepath)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR Glob %s failed to compile, %s", filepath, err)
|
||||||
|
}
|
||||||
|
for file, _ := range g.Match() {
|
||||||
|
tailer, err := tail.TailFile(file,
|
||||||
|
tail.Config{
|
||||||
|
ReOpen: true,
|
||||||
|
Follow: true,
|
||||||
|
Location: &seek,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
errS += err.Error() + " "
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// create a goroutine for each "tailer"
|
||||||
|
l.wg.Add(1)
|
||||||
|
go l.receiver(tailer)
|
||||||
|
l.tailers = append(l.tailers, tailer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if errS != "" {
|
||||||
|
return fmt.Errorf(errS)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// receiver is launched as a goroutine to continuously watch a tailed logfile
|
||||||
|
// for changes and send any log lines down the l.lines channel.
|
||||||
|
func (l *LogParserPlugin) receiver(tailer *tail.Tail) {
|
||||||
|
defer l.wg.Done()
|
||||||
|
|
||||||
|
var line *tail.Line
|
||||||
|
for line = range tailer.Lines {
|
||||||
|
if line.Err != nil {
|
||||||
|
log.Printf("ERROR tailing file %s, Error: %s\n",
|
||||||
|
tailer.Filename, line.Err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-l.done:
|
||||||
|
case l.lines <- line.Text:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parser is launched as a goroutine to watch the l.lines channel.
|
||||||
|
// when a line is available, parser parses it and adds the metric(s) to the
|
||||||
|
// accumulator.
|
||||||
|
func (l *LogParserPlugin) parser() {
|
||||||
|
defer l.wg.Done()
|
||||||
|
|
||||||
|
var m telegraf.Metric
|
||||||
|
var err error
|
||||||
|
var line string
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-l.done:
|
||||||
|
return
|
||||||
|
case line = <-l.lines:
|
||||||
|
if line == "" || line == "\n" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, parser := range l.parsers {
|
||||||
|
m, err = parser.ParseLine(line)
|
||||||
|
if err == nil {
|
||||||
|
if m != nil {
|
||||||
|
l.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("Malformed log line in [%s], Error: %s\n", line, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LogParserPlugin) Stop() {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
|
||||||
|
for _, t := range l.tailers {
|
||||||
|
err := t.Stop()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR stopping tail on file %s\n", t.Filename)
|
||||||
|
}
|
||||||
|
t.Cleanup()
|
||||||
|
}
|
||||||
|
close(l.done)
|
||||||
|
l.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("logparser", func() telegraf.Input {
|
||||||
|
return &LogParserPlugin{}
|
||||||
|
})
|
||||||
|
}
|
||||||
116
plugins/inputs/logparser/logparser_test.go
Normal file
116
plugins/inputs/logparser/logparser_test.go
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
package logparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs/logparser/grok"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStartNoParsers(t *testing.T) {
|
||||||
|
logparser := &LogParserPlugin{
|
||||||
|
FromBeginning: true,
|
||||||
|
Files: []string{"grok/testdata/*.log"},
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
assert.Error(t, logparser.Start(&acc))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGrokParseLogFilesNonExistPattern(t *testing.T) {
|
||||||
|
thisdir := getCurrentDir()
|
||||||
|
p := &grok.Parser{
|
||||||
|
Patterns: []string{"%{FOOBAR}"},
|
||||||
|
CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
|
||||||
|
logparser := &LogParserPlugin{
|
||||||
|
FromBeginning: true,
|
||||||
|
Files: []string{thisdir + "grok/testdata/*.log"},
|
||||||
|
GrokParser: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
assert.NoError(t, logparser.Start(&acc))
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 500)
|
||||||
|
logparser.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGrokParseLogFiles(t *testing.T) {
|
||||||
|
thisdir := getCurrentDir()
|
||||||
|
p := &grok.Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"},
|
||||||
|
CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
|
||||||
|
logparser := &LogParserPlugin{
|
||||||
|
FromBeginning: true,
|
||||||
|
Files: []string{thisdir + "grok/testdata/*.log"},
|
||||||
|
GrokParser: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
assert.NoError(t, logparser.Start(&acc))
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 500)
|
||||||
|
logparser.Stop()
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "logparser_grok",
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": float64(1.25),
|
||||||
|
"response_time": int64(5432),
|
||||||
|
"myint": int64(101),
|
||||||
|
},
|
||||||
|
map[string]string{"response_code": "200"})
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "logparser_grok",
|
||||||
|
map[string]interface{}{
|
||||||
|
"myfloat": 1.25,
|
||||||
|
"mystring": "mystring",
|
||||||
|
"nomodifier": "nomodifier",
|
||||||
|
},
|
||||||
|
map[string]string{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGrokParseLogFilesOneBad(t *testing.T) {
|
||||||
|
thisdir := getCurrentDir()
|
||||||
|
p := &grok.Parser{
|
||||||
|
Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"},
|
||||||
|
CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"},
|
||||||
|
}
|
||||||
|
assert.NoError(t, p.Compile())
|
||||||
|
|
||||||
|
logparser := &LogParserPlugin{
|
||||||
|
FromBeginning: true,
|
||||||
|
Files: []string{thisdir + "grok/testdata/*.log"},
|
||||||
|
GrokParser: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
assert.NoError(t, logparser.Start(&acc))
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 500)
|
||||||
|
logparser.Stop()
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "logparser_grok",
|
||||||
|
map[string]interface{}{
|
||||||
|
"clientip": "192.168.1.1",
|
||||||
|
"myfloat": float64(1.25),
|
||||||
|
"response_time": int64(5432),
|
||||||
|
"myint": int64(101),
|
||||||
|
},
|
||||||
|
map[string]string{"response_code": "200"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentDir() string {
|
||||||
|
_, filename, _, _ := runtime.Caller(1)
|
||||||
|
return strings.Replace(filename, "logparser_test.go", "", 1)
|
||||||
|
}
|
||||||
@@ -53,13 +53,13 @@ This plugin gathers the statistic data from MySQL server
|
|||||||
## gather metrics from SHOW BINARY LOGS command output
|
## gather metrics from SHOW BINARY LOGS command output
|
||||||
gather_binary_logs = false
|
gather_binary_logs = false
|
||||||
#
|
#
|
||||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE
|
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
|
||||||
gather_table_io_waits = false
|
gather_table_io_waits = false
|
||||||
#
|
#
|
||||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
|
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
|
||||||
gather_table_lock_waits = false
|
gather_table_lock_waits = false
|
||||||
#
|
#
|
||||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE
|
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
|
||||||
gather_index_io_waits = false
|
gather_index_io_waits = false
|
||||||
#
|
#
|
||||||
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
|
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
|
||||||
|
|||||||
@@ -97,11 +97,12 @@ func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
data := strings.SplitN(strings.TrimSpace(line), " ", 3)
|
data := strings.Fields(line)
|
||||||
accepts, err := strconv.ParseUint(data[0], 10, 64)
|
accepts, err := strconv.ParseUint(data[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
handled, err := strconv.ParseUint(data[1], 10, 64)
|
handled, err := strconv.ParseUint(data[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -116,7 +117,7 @@ func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
data = strings.SplitN(strings.TrimSpace(line), " ", 6)
|
data = strings.Fields(line)
|
||||||
reading, err := strconv.ParseUint(data[1], 10, 64)
|
reading, err := strconv.ParseUint(data[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -13,12 +13,18 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
const sampleResponse = `
|
const nginxSampleResponse = `
|
||||||
Active connections: 585
|
Active connections: 585
|
||||||
server accepts handled requests
|
server accepts handled requests
|
||||||
85340 85340 35085
|
85340 85340 35085
|
||||||
Reading: 4 Writing: 135 Waiting: 446
|
Reading: 4 Writing: 135 Waiting: 446
|
||||||
`
|
`
|
||||||
|
const tengineSampleResponse = `
|
||||||
|
Active connections: 403
|
||||||
|
server accepts handled requests request_time
|
||||||
|
853 8533 3502 1546565864
|
||||||
|
Reading: 8 Writing: 125 Waiting: 946
|
||||||
|
`
|
||||||
|
|
||||||
// Verify that nginx tags are properly parsed based on the server
|
// Verify that nginx tags are properly parsed based on the server
|
||||||
func TestNginxTags(t *testing.T) {
|
func TestNginxTags(t *testing.T) {
|
||||||
@@ -36,7 +42,9 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||||||
var rsp string
|
var rsp string
|
||||||
|
|
||||||
if r.URL.Path == "/stub_status" {
|
if r.URL.Path == "/stub_status" {
|
||||||
rsp = sampleResponse
|
rsp = nginxSampleResponse
|
||||||
|
} else if r.URL.Path == "/tengine_status" {
|
||||||
|
rsp = tengineSampleResponse
|
||||||
} else {
|
} else {
|
||||||
panic("Cannot handle request")
|
panic("Cannot handle request")
|
||||||
}
|
}
|
||||||
@@ -49,12 +57,20 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||||||
Urls: []string{fmt.Sprintf("%s/stub_status", ts.URL)},
|
Urls: []string{fmt.Sprintf("%s/stub_status", ts.URL)},
|
||||||
}
|
}
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
nt := &Nginx{
|
||||||
|
Urls: []string{fmt.Sprintf("%s/tengine_status", ts.URL)},
|
||||||
|
}
|
||||||
|
|
||||||
err := n.Gather(&acc)
|
var acc_nginx testutil.Accumulator
|
||||||
require.NoError(t, err)
|
var acc_tengine testutil.Accumulator
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
err_nginx := n.Gather(&acc_nginx)
|
||||||
|
err_tengine := nt.Gather(&acc_tengine)
|
||||||
|
|
||||||
|
require.NoError(t, err_nginx)
|
||||||
|
require.NoError(t, err_tengine)
|
||||||
|
|
||||||
|
fields_nginx := map[string]interface{}{
|
||||||
"active": uint64(585),
|
"active": uint64(585),
|
||||||
"accepts": uint64(85340),
|
"accepts": uint64(85340),
|
||||||
"handled": uint64(85340),
|
"handled": uint64(85340),
|
||||||
@@ -63,6 +79,17 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||||||
"writing": uint64(135),
|
"writing": uint64(135),
|
||||||
"waiting": uint64(446),
|
"waiting": uint64(446),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fields_tengine := map[string]interface{}{
|
||||||
|
"active": uint64(403),
|
||||||
|
"accepts": uint64(853),
|
||||||
|
"handled": uint64(8533),
|
||||||
|
"requests": uint64(3502),
|
||||||
|
"reading": uint64(8),
|
||||||
|
"writing": uint64(125),
|
||||||
|
"waiting": uint64(946),
|
||||||
|
}
|
||||||
|
|
||||||
addr, err := url.Parse(ts.URL)
|
addr, err := url.Parse(ts.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -81,5 +108,6 @@ func TestNginxGeneratesMetrics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tags := map[string]string{"server": host, "port": port}
|
tags := map[string]string{"server": host, "port": port}
|
||||||
acc.AssertContainsTaggedFields(t, "nginx", fields, tags)
|
acc_nginx.AssertContainsTaggedFields(t, "nginx", fields_nginx, tags)
|
||||||
|
acc_tengine.AssertContainsTaggedFields(t, "nginx", fields_tengine, tags)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ type Ping struct {
|
|||||||
// Number of pings to send (ping -c <COUNT>)
|
// Number of pings to send (ping -c <COUNT>)
|
||||||
Count int
|
Count int
|
||||||
|
|
||||||
// Ping timeout, in seconds. 0 means no timeout (ping -t <TIMEOUT>)
|
// Ping timeout, in seconds. 0 means no timeout (ping -W <TIMEOUT>)
|
||||||
Timeout float64
|
Timeout float64
|
||||||
|
|
||||||
// Interface to send ping from (ping -I <INTERFACE>)
|
// Interface to send ping from (ping -I <INTERFACE>)
|
||||||
@@ -55,7 +55,7 @@ const sampleConfig = `
|
|||||||
count = 1 # required
|
count = 1 # required
|
||||||
## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||||
ping_interval = 0.0
|
ping_interval = 0.0
|
||||||
## ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||||
timeout = 1.0
|
timeout = 1.0
|
||||||
## interface to send ping from (ping -I <INTERFACE>)
|
## interface to send ping from (ping -I <INTERFACE>)
|
||||||
interface = ""
|
interface = ""
|
||||||
@@ -76,7 +76,8 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
|||||||
go func(u string) {
|
go func(u string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
args := p.args(u)
|
args := p.args(u)
|
||||||
out, err := p.pingHost(p.Timeout, args...)
|
totalTimeout := float64(p.Count)*p.Timeout + float64(p.Count-1)*p.PingInterval
|
||||||
|
out, err := p.pingHost(totalTimeout, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Combine go err + stderr output
|
// Combine go err + stderr output
|
||||||
errorChannel <- errors.New(
|
errorChannel <- errors.New(
|
||||||
@@ -138,8 +139,8 @@ func (p *Ping) args(url string) []string {
|
|||||||
}
|
}
|
||||||
if p.Timeout > 0 {
|
if p.Timeout > 0 {
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "darwin", "freebsd":
|
case "darwin":
|
||||||
args = append(args, "-t", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
args = append(args, "-W", strconv.FormatFloat(p.Timeout/1000, 'f', 1, 64))
|
||||||
case "linux":
|
case "linux":
|
||||||
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', 1, 64))
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"mime"
|
"mime"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
|
||||||
@@ -74,13 +75,13 @@ func (p *PrometheusParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||||||
if mf.GetType() == dto.MetricType_SUMMARY {
|
if mf.GetType() == dto.MetricType_SUMMARY {
|
||||||
// summary metric
|
// summary metric
|
||||||
fields = makeQuantiles(m)
|
fields = makeQuantiles(m)
|
||||||
fields["count"] = float64(m.GetHistogram().GetSampleCount())
|
fields["count"] = float64(m.GetSummary().GetSampleCount())
|
||||||
fields["sum"] = float64(m.GetSummary().GetSampleSum())
|
fields["sum"] = float64(m.GetSummary().GetSampleSum())
|
||||||
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
|
} else if mf.GetType() == dto.MetricType_HISTOGRAM {
|
||||||
// historgram metric
|
// historgram metric
|
||||||
fields = makeBuckets(m)
|
fields = makeBuckets(m)
|
||||||
fields["count"] = float64(m.GetHistogram().GetSampleCount())
|
fields["count"] = float64(m.GetHistogram().GetSampleCount())
|
||||||
fields["sum"] = float64(m.GetSummary().GetSampleSum())
|
fields["sum"] = float64(m.GetHistogram().GetSampleSum())
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// standard metric
|
// standard metric
|
||||||
@@ -88,7 +89,13 @@ func (p *PrometheusParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
|||||||
}
|
}
|
||||||
// converting to telegraf metric
|
// converting to telegraf metric
|
||||||
if len(fields) > 0 {
|
if len(fields) > 0 {
|
||||||
metric, err := telegraf.NewMetric(metricName, tags, fields)
|
var t time.Time
|
||||||
|
if m.TimestampMs != nil && *m.TimestampMs > 0 {
|
||||||
|
t = time.Unix(0, *m.TimestampMs*1000000)
|
||||||
|
} else {
|
||||||
|
t = time.Now()
|
||||||
|
}
|
||||||
|
metric, err := telegraf.NewMetric(metricName, tags, fields, t)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
metrics = append(metrics, metric)
|
metrics = append(metrics, metric)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ func TestParseValidPrometheus(t *testing.T) {
|
|||||||
"0.5": 552048.506,
|
"0.5": 552048.506,
|
||||||
"0.9": 5.876804288e+06,
|
"0.9": 5.876804288e+06,
|
||||||
"0.99": 5.876804288e+06,
|
"0.99": 5.876804288e+06,
|
||||||
"count": 0.0,
|
"count": 9.0,
|
||||||
"sum": 1.8909097205e+07,
|
"sum": 1.8909097205e+07,
|
||||||
}, metrics[0].Fields())
|
}, metrics[0].Fields())
|
||||||
assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags())
|
assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags())
|
||||||
@@ -151,7 +151,7 @@ func TestParseValidPrometheus(t *testing.T) {
|
|||||||
assert.Equal(t, map[string]interface{}{
|
assert.Equal(t, map[string]interface{}{
|
||||||
"500000": 2000.0,
|
"500000": 2000.0,
|
||||||
"count": 2025.0,
|
"count": 2025.0,
|
||||||
"sum": 0.0,
|
"sum": 1.02726334e+08,
|
||||||
"250000": 1997.0,
|
"250000": 1997.0,
|
||||||
"2e+06": 2012.0,
|
"2e+06": 2012.0,
|
||||||
"4e+06": 2017.0,
|
"4e+06": 2017.0,
|
||||||
|
|||||||
@@ -27,7 +27,8 @@ const (
|
|||||||
defaultSeparator = "_"
|
defaultSeparator = "_"
|
||||||
)
|
)
|
||||||
|
|
||||||
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
|
var dropwarn = "ERROR: statsd message queue full. " +
|
||||||
|
"We have dropped %d messages so far. " +
|
||||||
"You may want to increase allowed_pending_messages in the config\n"
|
"You may want to increase allowed_pending_messages in the config\n"
|
||||||
|
|
||||||
var prevInstance *Statsd
|
var prevInstance *Statsd
|
||||||
@@ -65,6 +66,8 @@ type Statsd struct {
|
|||||||
|
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
// drops tracks the number of dropped metrics.
|
||||||
|
drops int
|
||||||
|
|
||||||
// Channel for all incoming statsd packets
|
// Channel for all incoming statsd packets
|
||||||
in chan []byte
|
in chan []byte
|
||||||
@@ -291,7 +294,10 @@ func (s *Statsd) udpListen() error {
|
|||||||
select {
|
select {
|
||||||
case s.in <- bufCopy:
|
case s.in <- bufCopy:
|
||||||
default:
|
default:
|
||||||
log.Printf(dropwarn, string(buf[:n]))
|
s.drops++
|
||||||
|
if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 {
|
||||||
|
log.Printf(dropwarn, s.drops)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,6 +29,8 @@ type TcpListener struct {
|
|||||||
// is an available bool in accept, then we are below the maximum and can
|
// is an available bool in accept, then we are below the maximum and can
|
||||||
// accept the connection
|
// accept the connection
|
||||||
accept chan bool
|
accept chan bool
|
||||||
|
// drops tracks the number of dropped metrics.
|
||||||
|
drops int
|
||||||
|
|
||||||
// track the listener here so we can close it in Stop()
|
// track the listener here so we can close it in Stop()
|
||||||
listener *net.TCPListener
|
listener *net.TCPListener
|
||||||
@@ -39,7 +41,8 @@ type TcpListener struct {
|
|||||||
acc telegraf.Accumulator
|
acc telegraf.Accumulator
|
||||||
}
|
}
|
||||||
|
|
||||||
var dropwarn = "ERROR: Message queue full. Discarding metric [%s], " +
|
var dropwarn = "ERROR: tcp_listener message queue full. " +
|
||||||
|
"We have dropped %d messages so far. " +
|
||||||
"You may want to increase allowed_pending_messages in the config\n"
|
"You may want to increase allowed_pending_messages in the config\n"
|
||||||
|
|
||||||
const sampleConfig = `
|
const sampleConfig = `
|
||||||
@@ -212,7 +215,10 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) {
|
|||||||
select {
|
select {
|
||||||
case t.in <- bufCopy:
|
case t.in <- bufCopy:
|
||||||
default:
|
default:
|
||||||
log.Printf(dropwarn, scanner.Text())
|
t.drops++
|
||||||
|
if t.drops == 1 || t.drops%t.AllowedPendingMessages == 0 {
|
||||||
|
log.Printf(dropwarn, t.drops)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ type UdpListener struct {
|
|||||||
|
|
||||||
in chan []byte
|
in chan []byte
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
// drops tracks the number of dropped metrics.
|
||||||
|
drops int
|
||||||
|
|
||||||
parser parsers.Parser
|
parser parsers.Parser
|
||||||
|
|
||||||
@@ -38,7 +40,8 @@ type UdpListener struct {
|
|||||||
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
|
// https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
|
||||||
const UDP_MAX_PACKET_SIZE int = 64 * 1024
|
const UDP_MAX_PACKET_SIZE int = 64 * 1024
|
||||||
|
|
||||||
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
|
var dropwarn = "ERROR: udp_listener message queue full. " +
|
||||||
|
"We have dropped %d messages so far. " +
|
||||||
"You may want to increase allowed_pending_messages in the config\n"
|
"You may want to increase allowed_pending_messages in the config\n"
|
||||||
|
|
||||||
const sampleConfig = `
|
const sampleConfig = `
|
||||||
@@ -125,7 +128,10 @@ func (u *UdpListener) udpListen() error {
|
|||||||
select {
|
select {
|
||||||
case u.in <- bufCopy:
|
case u.in <- bufCopy:
|
||||||
default:
|
default:
|
||||||
log.Printf(dropwarn, string(bufCopy))
|
u.drops++
|
||||||
|
if u.drops == 1 || u.drops%u.AllowedPendingMessages == 0 {
|
||||||
|
log.Printf(dropwarn, u.drops)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,9 +12,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gobwas/glob"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/filter"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
)
|
)
|
||||||
@@ -26,7 +25,7 @@ type Varnish struct {
|
|||||||
Stats []string
|
Stats []string
|
||||||
Binary string
|
Binary string
|
||||||
|
|
||||||
filter glob.Glob
|
filter filter.Filter
|
||||||
run runner
|
run runner
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,13 +77,13 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error {
|
|||||||
if s.filter == nil {
|
if s.filter == nil {
|
||||||
var err error
|
var err error
|
||||||
if len(s.Stats) == 0 {
|
if len(s.Stats) == 0 {
|
||||||
s.filter, err = internal.CompileFilter(defaultStats)
|
s.filter, err = filter.CompileFilter(defaultStats)
|
||||||
} else {
|
} else {
|
||||||
// legacy support, change "all" -> "*":
|
// legacy support, change "all" -> "*":
|
||||||
if s.Stats[0] == "all" {
|
if s.Stats[0] == "all" {
|
||||||
s.Stats[0] = "*"
|
s.Stats[0] = "*"
|
||||||
}
|
}
|
||||||
s.filter, err = internal.CompileFilter(s.Stats)
|
s.filter, err = filter.CompileFilter(s.Stats)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
|||||||
// Send data to a random server
|
// Send data to a random server
|
||||||
p := rand.Perm(len(g.conns))
|
p := rand.Perm(len(g.conns))
|
||||||
for _, n := range p {
|
for _, n := range p {
|
||||||
if _, e := fmt.Fprintf(g.conns[n], graphitePoints); e != nil {
|
if _, e := fmt.Fprint(g.conns[n], graphitePoints); e != nil {
|
||||||
// Error
|
// Error
|
||||||
log.Println("ERROR: " + err.Error())
|
log.Println("ERROR: " + err.Error())
|
||||||
// Let's try the next one
|
// Let's try the next one
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ type InfluxDB struct {
|
|||||||
Password string
|
Password string
|
||||||
Database string
|
Database string
|
||||||
UserAgent string
|
UserAgent string
|
||||||
Precision string
|
|
||||||
RetentionPolicy string
|
RetentionPolicy string
|
||||||
WriteConsistency string
|
WriteConsistency string
|
||||||
Timeout internal.Duration
|
Timeout internal.Duration
|
||||||
@@ -39,6 +38,9 @@ type InfluxDB struct {
|
|||||||
// Use SSL but skip chain & host verification
|
// Use SSL but skip chain & host verification
|
||||||
InsecureSkipVerify bool
|
InsecureSkipVerify bool
|
||||||
|
|
||||||
|
// Precision is only here for legacy support. It will be ignored.
|
||||||
|
Precision string
|
||||||
|
|
||||||
conns []client.Client
|
conns []client.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,12 +52,9 @@ var sampleConfig = `
|
|||||||
urls = ["http://localhost:8086"] # required
|
urls = ["http://localhost:8086"] # required
|
||||||
## The target database for metrics (telegraf will create it if not exists).
|
## The target database for metrics (telegraf will create it if not exists).
|
||||||
database = "telegraf" # required
|
database = "telegraf" # required
|
||||||
## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
|
||||||
## note: using "s" precision greatly improves InfluxDB compression.
|
|
||||||
precision = "s"
|
|
||||||
|
|
||||||
## Retention policy to write to.
|
## Retention policy to write to. Empty string writes to the default rp.
|
||||||
retention_policy = "default"
|
retention_policy = ""
|
||||||
## Write consistency (clusters only), can be: "any", "one", "quorom", "all"
|
## Write consistency (clusters only), can be: "any", "one", "quorom", "all"
|
||||||
write_consistency = "any"
|
write_consistency = "any"
|
||||||
|
|
||||||
@@ -184,7 +183,6 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
|
|||||||
}
|
}
|
||||||
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
|
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
Database: i.Database,
|
Database: i.Database,
|
||||||
Precision: i.Precision,
|
|
||||||
RetentionPolicy: i.RetentionPolicy,
|
RetentionPolicy: i.RetentionPolicy,
|
||||||
WriteConsistency: i.WriteConsistency,
|
WriteConsistency: i.WriteConsistency,
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
|||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("Skipping integration test in short mode")
|
t.Skip("Skipping integration test in short mode")
|
||||||
}
|
}
|
||||||
|
now := time.Now()
|
||||||
pTesting = &PrometheusClient{Listen: "localhost:9127"}
|
pTesting = &PrometheusClient{Listen: "localhost:9127"}
|
||||||
err := pTesting.Start()
|
err := pTesting.Start()
|
||||||
time.Sleep(time.Millisecond * 200)
|
time.Sleep(time.Millisecond * 200)
|
||||||
@@ -30,11 +31,13 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
|||||||
pt1, _ := telegraf.NewMetric(
|
pt1, _ := telegraf.NewMetric(
|
||||||
"test_point_1",
|
"test_point_1",
|
||||||
tags,
|
tags,
|
||||||
map[string]interface{}{"value": 0.0})
|
map[string]interface{}{"value": 0.0},
|
||||||
|
now)
|
||||||
pt2, _ := telegraf.NewMetric(
|
pt2, _ := telegraf.NewMetric(
|
||||||
"test_point_2",
|
"test_point_2",
|
||||||
tags,
|
tags,
|
||||||
map[string]interface{}{"value": 1.0})
|
map[string]interface{}{"value": 1.0},
|
||||||
|
now)
|
||||||
var metrics = []telegraf.Metric{
|
var metrics = []telegraf.Metric{
|
||||||
pt1,
|
pt1,
|
||||||
pt2,
|
pt2,
|
||||||
@@ -63,11 +66,13 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
|||||||
pt3, _ := telegraf.NewMetric(
|
pt3, _ := telegraf.NewMetric(
|
||||||
"test_point_3",
|
"test_point_3",
|
||||||
tags,
|
tags,
|
||||||
map[string]interface{}{"value": 0.0})
|
map[string]interface{}{"value": 0.0},
|
||||||
|
now)
|
||||||
pt4, _ := telegraf.NewMetric(
|
pt4, _ := telegraf.NewMetric(
|
||||||
"test_point_4",
|
"test_point_4",
|
||||||
tags,
|
tags,
|
||||||
map[string]interface{}{"value": 1.0})
|
map[string]interface{}{"value": 1.0},
|
||||||
|
now)
|
||||||
metrics = []telegraf.Metric{
|
metrics = []telegraf.Metric{
|
||||||
pt3,
|
pt3,
|
||||||
pt4,
|
pt4,
|
||||||
|
|||||||
@@ -84,6 +84,14 @@ func (a *Accumulator) AddFields(
|
|||||||
a.Metrics = append(a.Metrics, p)
|
a.Metrics = append(a.Metrics, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Accumulator) SetPrecision(precision, interval time.Duration) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Accumulator) DisablePrecision() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func (a *Accumulator) Debug() bool {
|
func (a *Accumulator) Debug() bool {
|
||||||
// stub for implementing Accumulator interface.
|
// stub for implementing Accumulator interface.
|
||||||
return a.debug
|
return a.debug
|
||||||
|
|||||||
Reference in New Issue
Block a user