Compare commits
102 Commits
ga-version
...
1.1.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8ecfe13bf8 | ||
|
|
a90a687d89 | ||
|
|
5ef6fe1d85 | ||
|
|
f9aef06a3c | ||
|
|
105bb65f73 | ||
|
|
16081b2d1a | ||
|
|
e43cfc2fce | ||
|
|
137272afea | ||
|
|
2150510bd4 | ||
|
|
fc59757a1a | ||
|
|
0cfa0d419a | ||
|
|
522658bd07 | ||
|
|
b1a97e35b9 | ||
|
|
c66363cba5 | ||
|
|
61269c3500 | ||
|
|
393d129982 | ||
|
|
80d4864844 | ||
|
|
f729fa990d | ||
|
|
662db7a944 | ||
|
|
c849b58de9 | ||
|
|
097b1e09db | ||
|
|
babd37bf35 | ||
|
|
91f48e7ad5 | ||
|
|
a12bd878e0 | ||
|
|
a4e8f24b16 | ||
|
|
a65447d22e | ||
|
|
b00ad65b08 | ||
|
|
a84ce5d5cb | ||
|
|
8ca4a50c18 | ||
|
|
03b2984ac2 | ||
|
|
9540a6532f | ||
|
|
cace663bbf | ||
|
|
acfdd15aa9 | ||
|
|
78f544c0aa | ||
|
|
2175a72fcc | ||
|
|
b03c1d9691 | ||
|
|
fead80844e | ||
|
|
ef885eda62 | ||
|
|
64a71263a1 | ||
|
|
974221f0cf | ||
|
|
bccef2856d | ||
|
|
80df3f7634 | ||
|
|
e96f7a9b12 | ||
|
|
2bbb6aa6f2 | ||
|
|
1ff721ad84 | ||
|
|
3e3b094270 | ||
|
|
1f7a8fceef | ||
|
|
b702a9758b | ||
|
|
3b607aa8ae | ||
|
|
4a4a6892f9 | ||
|
|
56b627dfe2 | ||
|
|
5c87b92976 | ||
|
|
dbcc312b0e | ||
|
|
2d842fefb8 | ||
|
|
d63e3c8cc4 | ||
|
|
187a894fe9 | ||
|
|
ca55c4a55d | ||
|
|
d627bdbbdb | ||
|
|
4f06f6b3d8 | ||
|
|
7f0fe78615 | ||
|
|
5913f7cb36 | ||
|
|
8dc42ad9f2 | ||
|
|
886bdd2ef2 | ||
|
|
5a86a2ff26 | ||
|
|
817d696628 | ||
|
|
4ab0344ebf | ||
|
|
7b05170145 | ||
|
|
b48ad4b737 | ||
|
|
9feb639bbd | ||
|
|
ce5054c850 | ||
|
|
c7834209d2 | ||
|
|
78ced6bc30 | ||
|
|
ca8e512e5b | ||
|
|
573628dbdd | ||
|
|
e477620dc5 | ||
|
|
32268fb25b | ||
|
|
80391bfe1f | ||
|
|
e19845c202 | ||
|
|
52134555d6 | ||
|
|
e7e39df6a0 | ||
|
|
055ef168ae | ||
|
|
2778b7be30 | ||
|
|
953db51b2c | ||
|
|
c043461f6c | ||
|
|
ddc07f9ef8 | ||
|
|
2cf1db0837 | ||
|
|
17e6496830 | ||
|
|
1d10eda84e | ||
|
|
9ea3dbeee8 | ||
|
|
100501ba72 | ||
|
|
f12368698b | ||
|
|
6b25a73629 | ||
|
|
90c7475c68 | ||
|
|
6648c101dd | ||
|
|
8d3285522c | ||
|
|
b613405f42 | ||
|
|
e999298078 | ||
|
|
0f0ab953f6 | ||
|
|
aaddbd153e | ||
|
|
9b2e2cc41f | ||
|
|
bc22309459 | ||
|
|
b6f81b538a |
69
CHANGELOG.md
69
CHANGELOG.md
@@ -1,9 +1,30 @@
|
||||
## v1.1 [unreleased]
|
||||
## v1.2 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v1.1 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
- Telegraf now supports two new types of plugins: processors & aggregators.
|
||||
|
||||
- On systemd Telegraf will no longer redirect it's stdout to /var/log/telegraf/telegraf.log.
|
||||
On most systems, the logs will be directed to the systemd journal and can be
|
||||
accessed by `journalctl -u telegraf.service`. Consult the systemd journal
|
||||
documentation for configuring journald. There is also a [`logfile` config option](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf#L70)
|
||||
available in 1.1, which will allow users to easily configure telegraf to
|
||||
continue sending logs to /var/log/telegraf/telegraf.log.
|
||||
|
||||
### Features
|
||||
|
||||
- [#1726](https://github.com/influxdata/telegraf/issues/1726): Processor & Aggregator plugin support.
|
||||
- [#1861](https://github.com/influxdata/telegraf/pull/1861): adding the tags in the graylog output plugin
|
||||
- [#1732](https://github.com/influxdata/telegraf/pull/1732): Telegraf systemd service, log to journal.
|
||||
- [#1782](https://github.com/influxdata/telegraf/pull/1782): Allow numeric and non-string values for tag_keys.
|
||||
- [#1694](https://github.com/influxdata/telegraf/pull/1694): Adding Gauge and Counter metric types.
|
||||
- [#1606](https://github.com/influxdata/telegraf/pull/1606): Remove carraige returns from exec plugin output on Windows
|
||||
- [#1674](https://github.com/influxdata/telegraf/issues/1674): elasticsearch input: configurable timeout.
|
||||
@@ -17,16 +38,58 @@
|
||||
- [#1542](https://github.com/influxdata/telegraf/pull/1542): Add filestack webhook plugin.
|
||||
- [#1599](https://github.com/influxdata/telegraf/pull/1599): Add server hostname for each docker measurements.
|
||||
- [#1697](https://github.com/influxdata/telegraf/pull/1697): Add NATS output plugin.
|
||||
- [#1407](https://github.com/influxdata/telegraf/pull/1407): HTTP service listener input plugin.
|
||||
- [#1407](https://github.com/influxdata/telegraf/pull/1407) & [#1915](https://github.com/influxdata/telegraf/pull/1915): HTTP service listener input plugin.
|
||||
- [#1699](https://github.com/influxdata/telegraf/pull/1699): Add database blacklist option for Postgresql
|
||||
- [#1791](https://github.com/influxdata/telegraf/pull/1791): Add Docker container state metrics to Docker input plugin output
|
||||
- [#1755](https://github.com/influxdata/telegraf/issues/1755): Add support to SNMP for IP & MAC address conversion.
|
||||
- [#1729](https://github.com/influxdata/telegraf/issues/1729): Add support to SNMP for OID index suffixes.
|
||||
- [#1813](https://github.com/influxdata/telegraf/pull/1813): Change default arguments for SNMP plugin.
|
||||
- [#1686](https://github.com/influxdata/telegraf/pull/1686): Mesos input plugin: very high-cardinality mesos-task metrics removed.
|
||||
- [#1838](https://github.com/influxdata/telegraf/pull/1838): Logging overhaul to centralize the logger & log levels, & provide a logfile config option.
|
||||
- [#1700](https://github.com/influxdata/telegraf/pull/1700): HAProxy plugin socket glob matching.
|
||||
- [#1847](https://github.com/influxdata/telegraf/pull/1847): Add Kubernetes plugin for retrieving pod metrics.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1955](https://github.com/influxdata/telegraf/issues/1955): Fix NATS plug-ins reconnection logic.
|
||||
- [#1936](https://github.com/influxdata/telegraf/issues/1936): Set required default values in udp_listener & tcp_listener.
|
||||
- [#1926](https://github.com/influxdata/telegraf/issues/1926): Fix toml unmarshal panic in Duration objects.
|
||||
- [#1746](https://github.com/influxdata/telegraf/issues/1746): Fix handling of non-string values for JSON keys listed in tag_keys.
|
||||
- [#1628](https://github.com/influxdata/telegraf/issues/1628): Fix mongodb input panic on version 2.2.
|
||||
- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags
|
||||
- [#1733](https://github.com/influxdata/telegraf/issues/1733): Fix statsd scientific notation parsing
|
||||
- [#1716](https://github.com/influxdata/telegraf/issues/1716): Sensors plugin strconv.ParseFloat: parsing "": invalid syntax
|
||||
- [#1530](https://github.com/influxdata/telegraf/issues/1530): Fix prometheus_client reload panic
|
||||
- [#1764](https://github.com/influxdata/telegraf/issues/1764): Fix kafka consumer panic when nil error is returned down errs channel.
|
||||
- [#1768](https://github.com/influxdata/telegraf/pull/1768): Speed up statsd parsing.
|
||||
- [#1751](https://github.com/influxdata/telegraf/issues/1751): Fix powerdns integer parse error handling.
|
||||
- [#1752](https://github.com/influxdata/telegraf/issues/1752): Fix varnish plugin defaults not being used.
|
||||
- [#1517](https://github.com/influxdata/telegraf/issues/1517): Fix windows glob paths.
|
||||
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix issue loading config directory on windows.
|
||||
- [#1772](https://github.com/influxdata/telegraf/pull/1772): Windows remote management interactive service fix.
|
||||
- [#1702](https://github.com/influxdata/telegraf/issues/1702): sqlserver, fix issue when case sensitive collation is activated.
|
||||
- [#1823](https://github.com/influxdata/telegraf/issues/1823): Fix huge allocations in http_listener when dealing with huge payloads.
|
||||
- [#1833](https://github.com/influxdata/telegraf/issues/1833): Fix translating SNMP fields not in MIB.
|
||||
- [#1835](https://github.com/influxdata/telegraf/issues/1835): Fix SNMP emitting empty fields.
|
||||
- [#1854](https://github.com/influxdata/telegraf/pull/1853): SQL Server waitstats truncation bug.
|
||||
- [#1810](https://github.com/influxdata/telegraf/issues/1810): Fix logparser common log format: numbers in ident.
|
||||
- [#1793](https://github.com/influxdata/telegraf/pull/1793): Fix JSON Serialization in OpenTSDB output.
|
||||
- [#1731](https://github.com/influxdata/telegraf/issues/1731): Fix Graphite template ordering, use most specific.
|
||||
- [#1836](https://github.com/influxdata/telegraf/pull/1836): Fix snmp table field initialization for non-automatic table.
|
||||
- [#1724](https://github.com/influxdata/telegraf/issues/1724): cgroups path being parsed as metric.
|
||||
- [#1886](https://github.com/influxdata/telegraf/issues/1886): Fix phpfpm fcgi client panic when URL does not exist.
|
||||
- [#1344](https://github.com/influxdata/telegraf/issues/1344): Fix config file parse error logging.
|
||||
- [#1771](https://github.com/influxdata/telegraf/issues/1771): Delete nil fields in the metric maker.
|
||||
- [#870](https://github.com/influxdata/telegraf/issues/870): Fix MySQL special characters in DSN parsing.
|
||||
- [#1742](https://github.com/influxdata/telegraf/issues/1742): Ping input odd timeout behavior.
|
||||
|
||||
## v1.0.1 [2016-09-26]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Prometheus output: Fix bug with multi-batch writes.
|
||||
- [#1738](https://github.com/influxdata/telegraf/issues/1738): Fix unmarshal of influxdb metrics with null tags.
|
||||
- [#1773](https://github.com/influxdata/telegraf/issues/1773): Add configurable timeout to influxdb input plugin.
|
||||
- [#1785](https://github.com/influxdata/telegraf/pull/1785): Fix statsd no default value panic.
|
||||
|
||||
## v1.0 [2016-09-08]
|
||||
|
||||
|
||||
190
CONTRIBUTING.md
190
CONTRIBUTING.md
@@ -2,7 +2,7 @@
|
||||
|
||||
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
||||
1. Make changes or write plugin (see below for details)
|
||||
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
|
||||
1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go`
|
||||
1. If your plugin requires a new Go package,
|
||||
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
||||
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
||||
@@ -16,8 +16,8 @@ for a good example.
|
||||
|
||||
## GoDoc
|
||||
|
||||
Public interfaces for inputs, outputs, metrics, and the accumulator can be found
|
||||
on the GoDoc
|
||||
Public interfaces for inputs, outputs, processors, aggregators, metrics,
|
||||
and the accumulator can be found on the GoDoc
|
||||
|
||||
[](https://godoc.org/github.com/influxdata/telegraf)
|
||||
|
||||
@@ -46,7 +46,7 @@ and submit new inputs.
|
||||
|
||||
### Input Plugin Guidelines
|
||||
|
||||
* A plugin must conform to the `telegraf.Input` interface.
|
||||
* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface.
|
||||
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* Input Plugins must be added to the
|
||||
@@ -177,7 +177,7 @@ similar constructs.
|
||||
|
||||
### Output Plugin Guidelines
|
||||
|
||||
* An output must conform to the `outputs.Output` interface.
|
||||
* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface.
|
||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
@@ -275,6 +275,186 @@ and `Stop()` methods.
|
||||
* Same as the `Output` guidelines, except that they must conform to the
|
||||
`output.ServiceOutput` interface.
|
||||
|
||||
## Processor Plugins
|
||||
|
||||
This section is for developers who want to create a new processor plugin.
|
||||
|
||||
### Processor Plugin Guidelines
|
||||
|
||||
* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface.
|
||||
* Processors should call `processors.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
processor can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this processor does.
|
||||
|
||||
### Processor Example
|
||||
|
||||
```go
|
||||
package printer
|
||||
|
||||
// printer.go
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
)
|
||||
|
||||
type Printer struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
`
|
||||
|
||||
func (p *Printer) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (p *Printer) Description() string {
|
||||
return "Print all metrics that pass through this filter."
|
||||
}
|
||||
|
||||
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
for _, metric := range in {
|
||||
fmt.Println(metric.String())
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
func init() {
|
||||
processors.Add("printer", func() telegraf.Processor {
|
||||
return &Printer{}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Aggregator Plugins
|
||||
|
||||
This section is for developers who want to create a new aggregator plugin.
|
||||
|
||||
### Aggregator Plugin Guidelines
|
||||
|
||||
* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface.
|
||||
* Aggregators should call `aggregators.Add` in their `init` function to register themselves.
|
||||
See below for a quick example.
|
||||
* To be available within Telegraf itself, plugins must add themselves to the
|
||||
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
|
||||
* The `SampleConfig` function should return valid toml that describes how the
|
||||
aggregator can be configured. This is include in `telegraf -sample-config`.
|
||||
* The `Description` function should say in one line what this aggregator does.
|
||||
* The Aggregator plugin will need to keep caches of metrics that have passed
|
||||
through it. This should be done using the builtin `HashID()` function of each
|
||||
metric.
|
||||
* When the `Reset()` function is called, all caches should be cleared.
|
||||
|
||||
### Aggregator Example
|
||||
|
||||
```go
|
||||
package min
|
||||
|
||||
// min.go
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
type Min struct {
|
||||
// caches for metric fields, names, and tags
|
||||
fieldCache map[uint64]map[string]float64
|
||||
nameCache map[uint64]string
|
||||
tagCache map[uint64]map[string]string
|
||||
}
|
||||
|
||||
func NewMin() telegraf.Aggregator {
|
||||
m := &Min{}
|
||||
m.Reset()
|
||||
return m
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## period is the flush & clear interval of the aggregator.
|
||||
period = "30s"
|
||||
## If true drop_original will drop the original metrics and
|
||||
## only send aggregates.
|
||||
drop_original = false
|
||||
`
|
||||
|
||||
func (m *Min) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *Min) Description() string {
|
||||
return "Keep the aggregate min of each metric passing through."
|
||||
}
|
||||
|
||||
func (m *Min) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
if _, ok := m.nameCache[id]; !ok {
|
||||
// hit an uncached metric, create caches for first time:
|
||||
m.nameCache[id] = in.Name()
|
||||
m.tagCache[id] = in.Tags()
|
||||
m.fieldCache[id] = make(map[string]float64)
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
m.fieldCache[id][k] = fv
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
if _, ok := m.fieldCache[id][k]; !ok {
|
||||
// hit an uncached field of a cached metric
|
||||
m.fieldCache[id][k] = fv
|
||||
continue
|
||||
}
|
||||
if fv < m.fieldCache[id][k] {
|
||||
// set new minimum
|
||||
m.fieldCache[id][k] = fv
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Min) Push(acc telegraf.Accumulator) {
|
||||
for id, _ := range m.nameCache {
|
||||
fields := map[string]interface{}{}
|
||||
for k, v := range m.fieldCache[id] {
|
||||
fields[k+"_min"] = v
|
||||
}
|
||||
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Min) Reset() {
|
||||
m.fieldCache = make(map[uint64]map[string]float64)
|
||||
m.nameCache = make(map[uint64]string)
|
||||
m.tagCache = make(map[uint64]map[string]string)
|
||||
}
|
||||
|
||||
func convert(in interface{}) (float64, bool) {
|
||||
switch v := in.(type) {
|
||||
case float64:
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("min", func() telegraf.Aggregator {
|
||||
return NewMin()
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Unit Tests
|
||||
|
||||
### Execute short tests
|
||||
|
||||
9
Godeps
9
Godeps
@@ -19,7 +19,7 @@ github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
||||
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
|
||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
||||
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
||||
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
|
||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
||||
@@ -27,8 +27,9 @@ github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
|
||||
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
||||
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
|
||||
github.com/influxdata/influxdb fc57c0f7c635df3873f3d64f0ed2100ddc94d5ae
|
||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
|
||||
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
|
||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
||||
@@ -47,7 +48,7 @@ github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||
github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
|
||||
github.com/soniah/gosnmp eb32571c2410868d85849ad67d1e51d01273eb84
|
||||
github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||
@@ -55,7 +56,7 @@ github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
||||
golang.org/x/crypto c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6
|
||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
||||
|
||||
11
Makefile
11
Makefile
@@ -1,4 +1,6 @@
|
||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
|
||||
COMMIT := $(shell sh -c 'git rev-parse HEAD')
|
||||
ifdef GOBIN
|
||||
PATH := $(GOBIN):$(PATH)
|
||||
else
|
||||
@@ -13,17 +15,18 @@ windows: prepare-windows build-windows
|
||||
|
||||
# Only run the build (no dependency grabbing)
|
||||
build:
|
||||
go install -ldflags "-X main.version=$(VERSION)" ./...
|
||||
go install -ldflags \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
|
||||
|
||||
build-windows:
|
||||
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
|
||||
"-X main.version=$(VERSION)" \
|
||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
build-for-docker:
|
||||
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
||||
"-s -X main.version=$(VERSION)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
||||
./cmd/telegraf/telegraf.go
|
||||
|
||||
# run package script
|
||||
package:
|
||||
|
||||
88
README.md
88
README.md
@@ -20,12 +20,12 @@ new plugins.
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.x86_64.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_amd64.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.armhf.rpm
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.1_armhf.deb
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1.armhf.rpm
|
||||
|
||||
##### Package Instructions:
|
||||
|
||||
@@ -46,14 +46,14 @@ to use this repo to install & update telegraf.
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_armhf.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_i386.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_linux_armhf.tar.gz
|
||||
|
||||
### FreeBSD tarball:
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_freebsd_amd64.tar.gz
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_freebsd_amd64.tar.gz
|
||||
|
||||
### Ansible Role:
|
||||
|
||||
@@ -69,7 +69,7 @@ brew install telegraf
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_windows_amd64.zip
|
||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.1_windows_amd64.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
@@ -85,44 +85,42 @@ if you don't have it already. You also must build with golang version 1.5+.
|
||||
|
||||
## How to use it:
|
||||
|
||||
```console
|
||||
$ telegraf -help
|
||||
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
See usage with:
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
|
||||
The flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
```
|
||||
telegraf --help
|
||||
```
|
||||
|
||||
### Generate a telegraf config file:
|
||||
|
||||
```
|
||||
telegraf config > telegraf.conf
|
||||
```
|
||||
|
||||
### Generate config with only cpu input & influxdb output plugins defined
|
||||
|
||||
```
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
```
|
||||
|
||||
### Run a single telegraf collection, outputing metrics to stdout
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf -test
|
||||
```
|
||||
|
||||
### Run telegraf with all plugins defined in config file
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf
|
||||
```
|
||||
|
||||
### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
|
||||
```
|
||||
telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
```
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
|
||||
@@ -2,9 +2,8 @@ package telegraf
|
||||
|
||||
import "time"
|
||||
|
||||
// Accumulator is an interface for "accumulating" metrics from input plugin(s).
|
||||
// The metrics are sent down a channel shared between all input plugins and then
|
||||
// flushed on the configured flush_interval.
|
||||
// Accumulator is an interface for "accumulating" metrics from plugin(s).
|
||||
// The metrics are sent down a channel shared between all plugins.
|
||||
type Accumulator interface {
|
||||
// AddFields adds a metric to the accumulator with the given measurement
|
||||
// name, fields, and tags (and timestamp). If a timestamp is not provided,
|
||||
@@ -29,12 +28,7 @@ type Accumulator interface {
|
||||
tags map[string]string,
|
||||
t ...time.Time)
|
||||
|
||||
AddError(err error)
|
||||
|
||||
Debug() bool
|
||||
SetDebug(enabled bool)
|
||||
|
||||
SetPrecision(precision, interval time.Duration)
|
||||
|
||||
DisablePrecision()
|
||||
AddError(err error)
|
||||
}
|
||||
|
||||
@@ -1,37 +1,40 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
)
|
||||
|
||||
type MetricMaker interface {
|
||||
Name() string
|
||||
MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric
|
||||
}
|
||||
|
||||
func NewAccumulator(
|
||||
inputConfig *models.InputConfig,
|
||||
maker MetricMaker,
|
||||
metrics chan telegraf.Metric,
|
||||
) *accumulator {
|
||||
acc := accumulator{}
|
||||
acc.metrics = metrics
|
||||
acc.inputConfig = inputConfig
|
||||
acc.precision = time.Nanosecond
|
||||
acc := accumulator{
|
||||
maker: maker,
|
||||
metrics: metrics,
|
||||
precision: time.Nanosecond,
|
||||
}
|
||||
return &acc
|
||||
}
|
||||
|
||||
type accumulator struct {
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
defaultTags map[string]string
|
||||
|
||||
debug bool
|
||||
// print every point added to the accumulator
|
||||
trace bool
|
||||
|
||||
inputConfig *models.InputConfig
|
||||
maker MetricMaker
|
||||
|
||||
precision time.Duration
|
||||
|
||||
@@ -44,7 +47,7 @@ func (ac *accumulator) AddFields(
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Untyped, t...); m != nil {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
@@ -55,7 +58,7 @@ func (ac *accumulator) AddGauge(
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Gauge, t...); m != nil {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
@@ -66,114 +69,11 @@ func (ac *accumulator) AddCounter(
|
||||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Counter, t...); m != nil {
|
||||
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil {
|
||||
ac.metrics <- m
|
||||
}
|
||||
}
|
||||
|
||||
// makeMetric either returns a metric, or returns nil if the metric doesn't
|
||||
// need to be created (because of filtering, an error, etc.)
|
||||
func (ac *accumulator) makeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t ...time.Time,
|
||||
) telegraf.Metric {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return nil
|
||||
}
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.inputConfig.NameOverride) != 0 {
|
||||
measurement = ac.inputConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.inputConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the metric filter(s)
|
||||
if ok := ac.inputConfig.Filter.Apply(measurement, fields, tags); !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
fields[k] = v
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
timestamp = timestamp.Round(ac.precision)
|
||||
|
||||
var m telegraf.Metric
|
||||
var err error
|
||||
switch mType {
|
||||
case telegraf.Counter:
|
||||
m, err = telegraf.NewCounterMetric(measurement, tags, fields, timestamp)
|
||||
case telegraf.Gauge:
|
||||
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, timestamp)
|
||||
default:
|
||||
m, err = telegraf.NewMetric(measurement, tags, fields, timestamp)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
if ac.trace {
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// AddError passes a runtime error to the accumulator.
|
||||
// The error will be tagged with the plugin name and written to the log.
|
||||
func (ac *accumulator) AddError(err error) {
|
||||
@@ -182,23 +82,7 @@ func (ac *accumulator) AddError(err error) {
|
||||
}
|
||||
atomic.AddUint64(&ac.errCount, 1)
|
||||
//TODO suppress/throttle consecutive duplicate errors?
|
||||
log.Printf("ERROR in input [%s]: %s", ac.inputConfig.Name, err)
|
||||
}
|
||||
|
||||
func (ac *accumulator) Debug() bool {
|
||||
return ac.debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetDebug(debug bool) {
|
||||
ac.debug = debug
|
||||
}
|
||||
|
||||
func (ac *accumulator) Trace() bool {
|
||||
return ac.trace
|
||||
}
|
||||
|
||||
func (ac *accumulator) SetTrace(trace bool) {
|
||||
ac.trace = trace
|
||||
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
|
||||
}
|
||||
|
||||
// SetPrecision takes two time.Duration objects. If the first is non-zero,
|
||||
@@ -222,17 +106,12 @@ func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *accumulator) DisablePrecision() {
|
||||
ac.precision = time.Nanosecond
|
||||
}
|
||||
|
||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
||||
ac.defaultTags = tags
|
||||
}
|
||||
|
||||
func (ac *accumulator) addDefaultTag(key, value string) {
|
||||
if ac.defaultTags == nil {
|
||||
ac.defaultTags = make(map[string]string)
|
||||
func (ac accumulator) getTime(t []time.Time) time.Time {
|
||||
var timestamp time.Time
|
||||
if len(t) > 0 {
|
||||
timestamp = t[0]
|
||||
} else {
|
||||
timestamp = time.Now()
|
||||
}
|
||||
ac.defaultTags[key] = value
|
||||
return timestamp.Round(ac.precision)
|
||||
}
|
||||
|
||||
@@ -4,24 +4,21 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
@@ -33,97 +30,142 @@ func TestAdd(t *testing.T) {
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddGauge(t *testing.T) {
|
||||
a := accumulator{}
|
||||
func TestAddFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
assert.Contains(t, actual, "acctest usage=99")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
|
||||
func TestAddCounter(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
func TestAccAddError(t *testing.T) {
|
||||
errBuf := bytes.NewBuffer(nil)
|
||||
log.SetOutput(errBuf)
|
||||
defer log.SetOutput(os.Stderr)
|
||||
|
||||
a.AddCounter("acctest",
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddError(fmt.Errorf("foo"))
|
||||
a.AddError(fmt.Errorf("bar"))
|
||||
a.AddError(fmt.Errorf("baz"))
|
||||
|
||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
||||
assert.EqualValues(t, 3, a.errCount)
|
||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
||||
assert.Contains(t, string(errs[0]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[0]), "foo")
|
||||
assert.Contains(t, string(errs[1]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[1]), "bar")
|
||||
assert.Contains(t, string(errs[2]), "TestPlugin")
|
||||
assert.Contains(t, string(errs[2]), "baz")
|
||||
}
|
||||
|
||||
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
a.SetPrecision(0, time.Second)
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddCounter("acctest",
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest",
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDisablePrecision(t *testing.T) {
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(time.Nanosecond, 0)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
}
|
||||
|
||||
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
@@ -151,79 +193,11 @@ func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.SetPrecision(time.Second, time.Millisecond)
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDisablePrecision(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.SetPrecision(time.Second, time.Millisecond)
|
||||
a.DisablePrecision()
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestDifferentPrecisions(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.SetPrecision(0, time.Second)
|
||||
a.AddFields("acctest",
|
||||
@@ -266,349 +240,100 @@ func TestDifferentPrecisions(t *testing.T) {
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddDefaultTags(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
func TestAddGauge(t *testing.T) {
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a.AddFields("acctest",
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
a.AddGauge("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,default=tag value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFields(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": float64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
// Test that all Inf fields get dropped, and not added to metrics channel
|
||||
func TestAddInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": inf,
|
||||
"nusage": ninf,
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
assert.Len(t, a.metrics, 0)
|
||||
|
||||
// test that non-inf fields are kept and not dropped
|
||||
fields["notinf"] = float64(100)
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest notinf=100")
|
||||
}
|
||||
|
||||
// Test that nan fields are dropped and not added
|
||||
func TestAddNaNFields(t *testing.T) {
|
||||
nan := math.NaN()
|
||||
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": nan,
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
assert.Len(t, a.metrics, 0)
|
||||
|
||||
// test that non-nan fields are kept and not dropped
|
||||
fields["notnan"] = float64(100)
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest notnan=100")
|
||||
}
|
||||
|
||||
func TestAddUint64Fields(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": uint64(99),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=99i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=99i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddUint64Overflow(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"usage": uint64(9223372036854775808),
|
||||
}
|
||||
a.AddFields("acctest", fields, map[string]string{})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest usage=9223372036854775807i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddInts(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": int32(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": int64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,default=tag value=101i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddFloats(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float32(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddStrings(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": "test"},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": "foo"},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestAddBools(t *testing.T) {
|
||||
a := accumulator{}
|
||||
a.addDefaultTag("default", "tag")
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": true}, map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": false}, map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=true")
|
||||
|
||||
testm = <-a.metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
|
||||
actual)
|
||||
}
|
||||
|
||||
// Test that tag filters get applied to metrics.
|
||||
func TestAccFilterTags(t *testing.T) {
|
||||
a := accumulator{}
|
||||
now := time.Now()
|
||||
a.metrics = make(chan telegraf.Metric, 10)
|
||||
defer close(a.metrics)
|
||||
filter := models.Filter{
|
||||
TagExclude: []string{"acc"},
|
||||
}
|
||||
assert.NoError(t, filter.Compile())
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
a.inputConfig.Filter = filter
|
||||
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddFields("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
testm := <-a.metrics
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
|
||||
testm = <-a.metrics
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest value=101 %d", now.UnixNano()),
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||
}
|
||||
|
||||
func TestAccAddError(t *testing.T) {
|
||||
errBuf := bytes.NewBuffer(nil)
|
||||
log.SetOutput(errBuf)
|
||||
defer log.SetOutput(os.Stderr)
|
||||
func TestAddCounter(t *testing.T) {
|
||||
now := time.Now()
|
||||
metrics := make(chan telegraf.Metric, 10)
|
||||
defer close(metrics)
|
||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||
|
||||
a := accumulator{}
|
||||
a.inputConfig = &models.InputConfig{}
|
||||
a.inputConfig.Name = "mock_plugin"
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"})
|
||||
a.AddCounter("acctest",
|
||||
map[string]interface{}{"value": float64(101)},
|
||||
map[string]string{"acc": "test"}, now)
|
||||
|
||||
a.AddError(fmt.Errorf("foo"))
|
||||
a.AddError(fmt.Errorf("bar"))
|
||||
a.AddError(fmt.Errorf("baz"))
|
||||
testm := <-metrics
|
||||
actual := testm.String()
|
||||
assert.Contains(t, actual, "acctest value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
||||
assert.EqualValues(t, 3, a.errCount)
|
||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
||||
assert.Contains(t, string(errs[0]), "mock_plugin")
|
||||
assert.Contains(t, string(errs[0]), "foo")
|
||||
assert.Contains(t, string(errs[1]), "mock_plugin")
|
||||
assert.Contains(t, string(errs[1]), "bar")
|
||||
assert.Contains(t, string(errs[2]), "mock_plugin")
|
||||
assert.Contains(t, string(errs[2]), "baz")
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
|
||||
testm = <-metrics
|
||||
actual = testm.String()
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
||||
actual)
|
||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||
}
|
||||
|
||||
type TestMetricMaker struct {
|
||||
}
|
||||
|
||||
func (tm *TestMetricMaker) Name() string {
|
||||
return "TestPlugin"
|
||||
}
|
||||
func (tm *TestMetricMaker) MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
switch mType {
|
||||
case telegraf.Untyped:
|
||||
if m, err := telegraf.NewMetric(measurement, tags, fields, t); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Counter:
|
||||
if m, err := telegraf.NewCounterMetric(measurement, tags, fields, t); err == nil {
|
||||
return m
|
||||
}
|
||||
case telegraf.Gauge:
|
||||
if m, err := telegraf.NewGaugeMetric(measurement, tags, fields, t); err == nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
259
agent/agent.go
259
agent/agent.go
@@ -49,18 +49,16 @@ func (a *Agent) Connect() error {
|
||||
switch ot := o.Output.(type) {
|
||||
case telegraf.ServiceOutput:
|
||||
if err := ot.Start(); err != nil {
|
||||
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||
log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
|
||||
o.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Attempting connection to output: %s\n", o.Name)
|
||||
}
|
||||
log.Printf("D! Attempting connection to output: %s\n", o.Name)
|
||||
err := o.Output.Connect()
|
||||
if err != nil {
|
||||
log.Printf("Failed to connect to output %s, retrying in 15s, "+
|
||||
log.Printf("E! Failed to connect to output %s, retrying in 15s, "+
|
||||
"error was '%s' \n", o.Name, err)
|
||||
time.Sleep(15 * time.Second)
|
||||
err = o.Output.Connect()
|
||||
@@ -68,9 +66,7 @@ func (a *Agent) Connect() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
log.Printf("D! Successfully connected to output: %s\n", o.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -92,9 +88,9 @@ func panicRecover(input *models.RunningInput) {
|
||||
if err := recover(); err != nil {
|
||||
trace := make([]byte, 2048)
|
||||
runtime.Stack(trace, true)
|
||||
log.Printf("FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name, err, trace)
|
||||
log.Println("PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
||||
input.Name(), err, trace)
|
||||
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
|
||||
"stack trace, configuration, and OS information: " +
|
||||
"https://github.com/influxdata/telegraf/issues/new")
|
||||
}
|
||||
@@ -107,20 +103,18 @@ func (a *Agent) gatherer(
|
||||
input *models.RunningInput,
|
||||
interval time.Duration,
|
||||
metricC chan telegraf.Metric,
|
||||
) error {
|
||||
) {
|
||||
defer panicRecover(input)
|
||||
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
var outerr error
|
||||
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
input.SetDebug(a.Config.Agent.Debug)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
||||
|
||||
@@ -128,17 +122,12 @@ func (a *Agent) gatherer(
|
||||
gatherWithTimeout(shutdown, input, acc, interval)
|
||||
elapsed := time.Since(start)
|
||||
|
||||
if outerr != nil {
|
||||
return outerr
|
||||
}
|
||||
if a.Config.Agent.Debug {
|
||||
log.Printf("Input [%s] gathered metrics, (%s interval) in %s\n",
|
||||
input.Name, interval, elapsed)
|
||||
}
|
||||
log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n",
|
||||
input.Name(), interval, elapsed)
|
||||
|
||||
select {
|
||||
case <-shutdown:
|
||||
return nil
|
||||
return
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
@@ -167,13 +156,13 @@ func gatherWithTimeout(
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
log.Printf("ERROR in input [%s]: %s", input.Name, err)
|
||||
log.Printf("E! ERROR in input [%s]: %s", input.Name(), err)
|
||||
}
|
||||
return
|
||||
case <-ticker.C:
|
||||
log.Printf("ERROR: input [%s] took longer to collect than "+
|
||||
log.Printf("E! ERROR: input [%s] took longer to collect than "+
|
||||
"collection interval (%s)",
|
||||
input.Name, timeout)
|
||||
input.Name(), timeout)
|
||||
continue
|
||||
case <-shutdown:
|
||||
return
|
||||
@@ -201,13 +190,13 @@ func (a *Agent) Test() error {
|
||||
}()
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetTrace(true)
|
||||
acc := NewAccumulator(input, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
input.SetTrace(true)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
|
||||
if input.Config.Interval != 0 {
|
||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
||||
}
|
||||
@@ -221,10 +210,10 @@ func (a *Agent) Test() error {
|
||||
|
||||
// Special instructions for some inputs. cpu, for example, needs to be
|
||||
// run twice in order to return cpu usage percentages.
|
||||
switch input.Name {
|
||||
switch input.Name() {
|
||||
case "cpu", "mongodb", "procstat":
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
|
||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
|
||||
if err := input.Input.Gather(acc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -244,7 +233,7 @@ func (a *Agent) flush() {
|
||||
defer wg.Done()
|
||||
err := output.Write()
|
||||
if err != nil {
|
||||
log.Printf("Error writing to output [%s]: %s\n",
|
||||
log.Printf("E! Error writing to output [%s]: %s\n",
|
||||
output.Name, err.Error())
|
||||
}
|
||||
}(o)
|
||||
@@ -257,31 +246,146 @@ func (a *Agent) flush() {
|
||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
|
||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||
// the flusher will flush after metrics are collected.
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
time.Sleep(time.Millisecond * 300)
|
||||
|
||||
// create an output metric channel and a gorouting that continously passes
|
||||
// each metric onto the output plugins & aggregators.
|
||||
outMetricC := make(chan telegraf.Metric, 100)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
if len(outMetricC) > 0 {
|
||||
// keep going until outMetricC is flushed
|
||||
continue
|
||||
}
|
||||
return
|
||||
case m := <-outMetricC:
|
||||
// if dropOriginal is set to true, then we will only send this
|
||||
// metric to the aggregators, not the outputs.
|
||||
var dropOriginal bool
|
||||
if !m.IsAggregate() {
|
||||
for _, agg := range a.Config.Aggregators {
|
||||
if ok := agg.Add(copyMetric(m)); ok {
|
||||
dropOriginal = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !dropOriginal {
|
||||
for i, o := range a.Config.Outputs {
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(copyMetric(m))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
log.Println("Hang on, flushing any cached metrics before shutdown")
|
||||
log.Println("I! Hang on, flushing any cached metrics before shutdown")
|
||||
// wait for outMetricC to get flushed before flushing outputs
|
||||
wg.Wait()
|
||||
a.flush()
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
||||
a.flush()
|
||||
case m := <-metricC:
|
||||
for i, o := range a.Config.Outputs {
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(copyMetric(m))
|
||||
}
|
||||
case metric := <-metricC:
|
||||
// NOTE potential bottleneck here as we put each metric through the
|
||||
// processors serially.
|
||||
mS := []telegraf.Metric{metric}
|
||||
for _, processor := range a.Config.Processors {
|
||||
mS = processor.Apply(mS...)
|
||||
}
|
||||
for _, m := range mS {
|
||||
outMetricC <- m
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s \n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
// channel shared between all input threads for accumulating metrics
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input, metricC)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.SetPrecision(time.Nanosecond, 0)
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name(), err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, metricC); err != nil {
|
||||
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(len(a.Config.Aggregators))
|
||||
for _, aggregator := range a.Config.Aggregators {
|
||||
go func(agg *models.RunningAggregator) {
|
||||
defer wg.Done()
|
||||
acc := NewAccumulator(agg, metricC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
agg.Run(acc, shutdown)
|
||||
}(aggregator)
|
||||
}
|
||||
|
||||
wg.Add(len(a.Config.Inputs))
|
||||
for _, input := range a.Config.Inputs {
|
||||
interval := a.Config.Agent.Interval.Duration
|
||||
// overwrite global interval if this plugin has it's own.
|
||||
if input.Config.Interval != 0 {
|
||||
interval = input.Config.Interval
|
||||
}
|
||||
go func(in *models.RunningInput, interv time.Duration) {
|
||||
defer wg.Done()
|
||||
a.gatherer(shutdown, in, interv, metricC)
|
||||
}(input, interval)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyMetric(m telegraf.Metric) telegraf.Metric {
|
||||
t := time.Time(m.Time())
|
||||
|
||||
@@ -297,68 +401,3 @@ func copyMetric(m telegraf.Metric) telegraf.Metric {
|
||||
out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
|
||||
return out
|
||||
}
|
||||
|
||||
// Run runs the agent daemon, gathering every Interval
|
||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+
|
||||
"Flush Interval:%s \n",
|
||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet,
|
||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
||||
|
||||
// channel shared between all input threads for accumulating metrics
|
||||
metricC := make(chan telegraf.Metric, 10000)
|
||||
|
||||
for _, input := range a.Config.Inputs {
|
||||
// Start service of any ServicePlugins
|
||||
switch p := input.Input.(type) {
|
||||
case telegraf.ServiceInput:
|
||||
acc := NewAccumulator(input.Config, metricC)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
// Service input plugins should set their own precision of their
|
||||
// metrics.
|
||||
acc.DisablePrecision()
|
||||
acc.setDefaultTags(a.Config.Tags)
|
||||
if err := p.Start(acc); err != nil {
|
||||
log.Printf("Service for input %s failed to start, exiting\n%s\n",
|
||||
input.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
defer p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Round collection to nearest interval by sleeping
|
||||
if a.Config.Agent.RoundInterval {
|
||||
i := int64(a.Config.Agent.Interval.Duration)
|
||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := a.flusher(shutdown, metricC); err != nil {
|
||||
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||
close(shutdown)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(len(a.Config.Inputs))
|
||||
for _, input := range a.Config.Inputs {
|
||||
interval := a.Config.Agent.Interval.Duration
|
||||
// overwrite global interval if this plugin has it's own.
|
||||
if input.Config.Interval != 0 {
|
||||
interval = input.Config.Interval
|
||||
}
|
||||
go func(in *models.RunningInput, interv time.Duration) {
|
||||
defer wg.Done()
|
||||
if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
|
||||
log.Printf(err.Error())
|
||||
}
|
||||
}(input, interval)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
22
aggregator.go
Normal file
22
aggregator.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package telegraf
|
||||
|
||||
// Aggregator is an interface for implementing an Aggregator plugin.
|
||||
// the RunningAggregator wraps this interface and guarantees that
|
||||
// Add, Push, and Reset can not be called concurrently, so locking is not
|
||||
// required when implementing an Aggregator plugin.
|
||||
type Aggregator interface {
|
||||
// SampleConfig returns the default configuration of the Input.
|
||||
SampleConfig() string
|
||||
|
||||
// Description returns a one-sentence description on the Input.
|
||||
Description() string
|
||||
|
||||
// Add the metric to the aggregator.
|
||||
Add(in Metric)
|
||||
|
||||
// Push pushes the current aggregates to the accumulator.
|
||||
Push(acc Accumulator)
|
||||
|
||||
// Reset resets the aggregators caches and aggregates.
|
||||
Reset()
|
||||
}
|
||||
@@ -4,17 +4,14 @@ machine:
|
||||
post:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.7.1 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.1.linux-amd64.tar.gz
|
||||
- go version | grep 1.7.3 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.3.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.3.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- docker info
|
||||
post:
|
||||
- gem install fpm
|
||||
- sudo apt-get install -y rpm python-boto
|
||||
|
||||
test:
|
||||
override:
|
||||
|
||||
@@ -12,15 +12,18 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/logger"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
_ "github.com/influxdata/telegraf/plugins/processors/all"
|
||||
"github.com/kardianos/service"
|
||||
)
|
||||
|
||||
var fDebug = flag.Bool("debug", false,
|
||||
"show metrics as they're generated to stdout")
|
||||
"turn on debug logging")
|
||||
var fQuiet = flag.Bool("quiet", false,
|
||||
"run in quiet mode")
|
||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||
@@ -39,6 +42,10 @@ var fOutputFilters = flag.String("output-filter", "",
|
||||
"filter the outputs to enable, separator is :")
|
||||
var fOutputList = flag.Bool("output-list", false,
|
||||
"print available output plugins.")
|
||||
var fAggregatorFilters = flag.String("aggregator-filter", "",
|
||||
"filter the aggregators to enable, separator is :")
|
||||
var fProcessorFilters = flag.String("processor-filter", "",
|
||||
"filter the processors to enable, separator is :")
|
||||
var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
var fService = flag.String("service", "",
|
||||
@@ -52,59 +59,57 @@ var (
|
||||
branch string
|
||||
)
|
||||
|
||||
func init() {
|
||||
// If commit or branch are not set, make that clear.
|
||||
if commit == "" {
|
||||
commit = "unknown"
|
||||
}
|
||||
if branch == "" {
|
||||
branch = "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||
|
||||
Usage:
|
||||
|
||||
telegraf <flags>
|
||||
telegraf [commands|flags]
|
||||
|
||||
The flags are:
|
||||
The commands & flags are:
|
||||
|
||||
-config <file> configuration file to load
|
||||
-test gather metrics once, print them to stdout, and exit
|
||||
-sample-config print out full sample configuration to stdout
|
||||
-config-directory directory containing additional *.conf files
|
||||
-input-filter filter the input plugins to enable, separator is :
|
||||
-input-list print all the plugins inputs
|
||||
-output-filter filter the output plugins to enable, separator is :
|
||||
-output-list print all the available outputs
|
||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
||||
-debug print metrics as they're generated to stdout
|
||||
-quiet run in quiet mode
|
||||
-version print the version to stdout
|
||||
-service Control the service, ie, 'telegraf -service install (windows only)'
|
||||
config print out full sample configuration to stdout
|
||||
version print the version to stdout
|
||||
|
||||
In addition to the -config flag, telegraf will also load the config file from
|
||||
an environment variable or default location. Precedence is:
|
||||
1. -config flag
|
||||
2. $TELEGRAF_CONFIG_PATH environment variable
|
||||
3. $HOME/.telegraf/telegraf.conf
|
||||
4. /etc/telegraf/telegraf.conf
|
||||
--config <file> configuration file to load
|
||||
--test gather metrics once, print them to stdout, and exit
|
||||
--config-directory directory containing additional *.conf files
|
||||
--input-filter filter the input plugins to enable, separator is :
|
||||
--output-filter filter the output plugins to enable, separator is :
|
||||
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||
--debug print metrics as they're generated to stdout
|
||||
--quiet run in quiet mode
|
||||
|
||||
Examples:
|
||||
|
||||
# generate a telegraf config file:
|
||||
telegraf -sample-config > telegraf.conf
|
||||
telegraf config > telegraf.conf
|
||||
|
||||
# generate config with only cpu input & influxdb output plugins defined
|
||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
||||
telegraf --input-filter cpu --output-filter influxdb config
|
||||
|
||||
# run a single telegraf collection, outputing metrics to stdout
|
||||
telegraf -config telegraf.conf -test
|
||||
telegraf --config telegraf.conf -test
|
||||
|
||||
# run telegraf with all plugins defined in config file
|
||||
telegraf -config telegraf.conf
|
||||
telegraf --config telegraf.conf
|
||||
|
||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||
`
|
||||
|
||||
var logger service.Logger
|
||||
|
||||
var stop chan struct{}
|
||||
|
||||
var srvc service.Service
|
||||
var svcConfig *service.Config
|
||||
|
||||
type program struct{}
|
||||
|
||||
@@ -119,7 +124,6 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
reload <- true
|
||||
for <-reload {
|
||||
reload <- false
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
@@ -133,15 +137,29 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||
}
|
||||
var aggregatorFilters []string
|
||||
if *fAggregatorFilters != "" {
|
||||
aggregatorFilter := strings.TrimSpace(*fAggregatorFilters)
|
||||
aggregatorFilters = strings.Split(":"+aggregatorFilter+":", ":")
|
||||
}
|
||||
var processorFilters []string
|
||||
if *fProcessorFilters != "" {
|
||||
processorFilter := strings.TrimSpace(*fProcessorFilters)
|
||||
processorFilters = strings.Split(":"+processorFilter+":", ":")
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
case "version":
|
||||
v := fmt.Sprintf("Telegraf - version %s", version)
|
||||
fmt.Println(v)
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case "config":
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -161,28 +179,23 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
}
|
||||
return
|
||||
case *fVersion:
|
||||
v := fmt.Sprintf("Telegraf - version %s", version)
|
||||
fmt.Println(v)
|
||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
||||
return
|
||||
case *fSampleConfig:
|
||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
||||
config.PrintSampleConfig(
|
||||
inputFilters,
|
||||
outputFilters,
|
||||
aggregatorFilters,
|
||||
processorFilters,
|
||||
)
|
||||
return
|
||||
case *fUsage != "":
|
||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||
log.Fatalf("%s and %s", err, err2)
|
||||
log.Fatalf("E! %s and %s", err, err2)
|
||||
}
|
||||
}
|
||||
return
|
||||
case *fService != "" && runtime.GOOS == "windows":
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If no other options are specified, load the config file and run.
|
||||
@@ -191,47 +204,45 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
c.InputFilters = inputFilters
|
||||
err := c.LoadConfig(*fConfig)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
|
||||
if *fConfigDirectory != "" {
|
||||
err = c.LoadDirectory(*fConfigDirectory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
if len(c.Outputs) == 0 {
|
||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
|
||||
}
|
||||
if len(c.Inputs) == 0 {
|
||||
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
|
||||
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
|
||||
}
|
||||
|
||||
ag, err := agent.NewAgent(c)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
|
||||
if *fDebug {
|
||||
ag.Config.Agent.Debug = true
|
||||
}
|
||||
|
||||
if *fQuiet {
|
||||
ag.Config.Agent.Quiet = true
|
||||
}
|
||||
// Setup logging
|
||||
logger.SetupLogging(
|
||||
ag.Config.Agent.Debug || *fDebug,
|
||||
ag.Config.Agent.Quiet || *fQuiet,
|
||||
ag.Config.Agent.Logfile,
|
||||
)
|
||||
|
||||
if *fTest {
|
||||
err = ag.Test()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = ag.Connect()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
@@ -244,7 +255,7 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
close(shutdown)
|
||||
}
|
||||
if sig == syscall.SIGHUP {
|
||||
log.Printf("Reloading Telegraf config\n")
|
||||
log.Printf("I! Reloading Telegraf config\n")
|
||||
<-reload
|
||||
reload <- true
|
||||
close(shutdown)
|
||||
@@ -254,15 +265,15 @@ func reloadLoop(stop chan struct{}, s service.Service) {
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("Starting Telegraf (version %s)\n", version)
|
||||
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("Tags enabled: %s", c.ListTags())
|
||||
log.Printf("I! Starting Telegraf (version %s)\n", version)
|
||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||
|
||||
if *fPidfile != "" {
|
||||
f, err := os.Create(*fPidfile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create pidfile: %s", err)
|
||||
log.Fatalf("E! Unable to create pidfile: %s", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||
@@ -294,8 +305,10 @@ func (p *program) Stop(s service.Service) error {
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
if runtime.GOOS == "windows" {
|
||||
svcConfig = &service.Config{
|
||||
svcConfig := &service.Config{
|
||||
Name: "telegraf",
|
||||
DisplayName: "Telegraf Data Collector Service",
|
||||
Description: "Collects data using a series of plugins and publishes it to" +
|
||||
@@ -306,15 +319,23 @@ func main() {
|
||||
prg := &program{}
|
||||
s, err := service.New(prg, svcConfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
logger, err = s.Logger(nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = s.Run()
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
// Handle the -service flag here to prevent any issues with tooling that
|
||||
// may not have an interactive session, e.g. installing from Ansible.
|
||||
if *fService != "" {
|
||||
if *fConfig != "" {
|
||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||
}
|
||||
err := service.Control(s, *fService)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
} else {
|
||||
err = s.Run()
|
||||
if err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
stop = make(chan struct{})
|
||||
|
||||
@@ -1,38 +1,38 @@
|
||||
# Telegraf Configuration
|
||||
|
||||
## Generating a Configuration File
|
||||
|
||||
A default Telegraf config file can be generated using the -sample-config flag:
|
||||
|
||||
```
|
||||
telegraf -sample-config > telegraf.conf
|
||||
```
|
||||
|
||||
To generate a file with specific inputs and outputs, you can use the
|
||||
-input-filter and -output-filter flags:
|
||||
|
||||
```
|
||||
telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka
|
||||
```
|
||||
|
||||
You can see the latest config file with all available plugins here:
|
||||
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
|
||||
|
||||
## Generating a Configuration File
|
||||
|
||||
A default Telegraf config file can be auto-generated by telegraf:
|
||||
|
||||
```
|
||||
telegraf config > telegraf.conf
|
||||
```
|
||||
|
||||
To generate a file with specific inputs and outputs, you can use the
|
||||
--input-filter and --output-filter flags:
|
||||
|
||||
```
|
||||
telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Environment variables can be used anywhere in the config file, simply prepend
|
||||
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
## `[global_tags]` Configuration
|
||||
# Global Tags
|
||||
|
||||
Global tags can be specified in the `[global_tags]` section of the config file
|
||||
in key="value" format. All metrics being gathered on this host will be tagged
|
||||
with the tags specified here.
|
||||
|
||||
## `[agent]` Configuration
|
||||
## Agent Configuration
|
||||
|
||||
Telegraf has a few options you can configure under the `agent` section of the
|
||||
Telegraf has a few options you can configure under the `[agent]` section of the
|
||||
config.
|
||||
|
||||
* **interval**: Default data collection interval for all inputs
|
||||
@@ -56,13 +56,63 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
This is primarily to avoid
|
||||
large write spikes for users running a large number of telegraf instances.
|
||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||
* **precision**: By default, precision will be set to the same timestamp order
|
||||
as the collection interval, with the maximum being 1s. Precision will NOT
|
||||
be used for service inputs, such as logparser and statsd. Valid values are
|
||||
"ns", "us" (or "µs"), "ms", "s".
|
||||
* **logfile**: Specify the log file name. The empty string means to log to stdout.
|
||||
* **debug**: Run telegraf in debug mode.
|
||||
* **quiet**: Run telegraf in quiet mode.
|
||||
* **quiet**: Run telegraf in quiet mode (error messages only).
|
||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||
* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent.
|
||||
|
||||
## Input Configuration
|
||||
|
||||
The following config parameters are available for all inputs:
|
||||
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
|
||||
## Output Configuration
|
||||
|
||||
There are no generic configuration options available for all outputs.
|
||||
|
||||
## Aggregator Configuration
|
||||
|
||||
The following config parameters are available for all aggregators:
|
||||
|
||||
* **period**: The period on which to flush & clear each aggregator. All metrics
|
||||
that are sent with timestamps outside of this period will be ignored by the
|
||||
aggregator.
|
||||
* **delay**: The delay before each aggregator is flushed. This is to control
|
||||
how long for aggregators to wait before receiving metrics from input plugins,
|
||||
in the case that aggregators are flushing and inputs are gathering on the
|
||||
same interval.
|
||||
* **drop_original**: If true, the original metric will be dropped by the
|
||||
aggregator and will not get sent to the output plugins.
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
|
||||
## Processor Configuration
|
||||
|
||||
The following config parameters are available for all processors:
|
||||
|
||||
* **order**: This is the order in which the processor(s) get executed. If this
|
||||
is not specified then processor execution order will be random.
|
||||
|
||||
#### Measurement Filtering
|
||||
|
||||
Filters can be configured per input or output, see below for examples.
|
||||
Filters can be configured per input, output, processor, or aggregator,
|
||||
see below for examples.
|
||||
|
||||
* **namepass**: An array of strings that is used to filter metrics generated by the
|
||||
current input. Each string in the array is tested as a glob match against
|
||||
@@ -90,19 +140,6 @@ the tag keys in the final measurement.
|
||||
the plugin definition, otherwise subsequent plugin config options will be
|
||||
interpreted as part of the tagpass/tagdrop map.
|
||||
|
||||
## Input Configuration
|
||||
|
||||
Some configuration options are configurable per input:
|
||||
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the input).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular input should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
#### Input Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
@@ -254,11 +291,7 @@ to avoid measurement collisions:
|
||||
fielddrop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## Output Configuration
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
#### Output Configuration Examples:
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
@@ -283,3 +316,39 @@ found by running `telegraf -sample-config`.
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
||||
|
||||
#### Aggregator Configuration Examples:
|
||||
|
||||
This will collect and emit the min/max of the system load1 metric every
|
||||
30s, dropping the originals.
|
||||
|
||||
```toml
|
||||
[[inputs.system]]
|
||||
fieldpass = ["load1"] # collects system load1 metric.
|
||||
|
||||
[[aggregators.minmax]]
|
||||
period = "30s" # send & clear the aggregate every 30s.
|
||||
drop_original = true # drop the original metrics.
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
|
||||
This will collect and emit the min/max of the swap metrics every
|
||||
30s, dropping the originals. The aggregator will not be applied
|
||||
to the system load metrics due to the `namepass` parameter.
|
||||
|
||||
```toml
|
||||
[[inputs.swap]]
|
||||
|
||||
[[inputs.system]]
|
||||
fieldpass = ["load1"] # collects system load1 metric.
|
||||
|
||||
[[aggregators.minmax]]
|
||||
period = "30s" # send & clear the aggregate every 30s.
|
||||
drop_original = true # drop the original metrics.
|
||||
namepass = ["swap"] # only "pass" swap metrics through the aggregator.
|
||||
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
@@ -232,6 +232,16 @@ us.west.cpu.load 100
|
||||
=> cpu.load,region=us.west value=100
|
||||
```
|
||||
|
||||
Multiple templates can also be specified, but these should be differentiated
|
||||
using _filters_ (see below for more details)
|
||||
|
||||
```toml
|
||||
templates = [
|
||||
"*.*.* region.region.measurement", # <- all 3-part measurements will match this one.
|
||||
"*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one.
|
||||
]
|
||||
```
|
||||
|
||||
#### Field Templates:
|
||||
|
||||
The field keyword tells Telegraf to give the metric that field name.
|
||||
|
||||
@@ -30,12 +30,15 @@
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will send metrics to outputs in batches of at
|
||||
## most metric_batch_size metrics.
|
||||
## Telegraf will send metrics to outputs in batches of at most
|
||||
## metric_batch_size metrics.
|
||||
## This controls the size of writes that Telegraf sends to output plugins.
|
||||
metric_batch_size = 1000
|
||||
|
||||
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
||||
## output, and will flush this buffer on a successful write. Oldest metrics
|
||||
## are dropped first when this buffer fills.
|
||||
## This buffer only fills when writes fail to output plugin(s).
|
||||
metric_buffer_limit = 10000
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
@@ -57,10 +60,15 @@
|
||||
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
||||
precision = ""
|
||||
## Run telegraf in debug mode
|
||||
|
||||
## Logging configuration:
|
||||
## Run telegraf with debug log messages.
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
## Run telegraf in quiet mode (error log messages only).
|
||||
quiet = false
|
||||
## Specify the log file name. The empty string means to log to stderr.
|
||||
logfile = ""
|
||||
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
||||
@@ -152,7 +160,7 @@
|
||||
# # Configuration for AWS CloudWatch output.
|
||||
# [[outputs.cloudwatch]]
|
||||
# ## Amazon REGION
|
||||
# region = 'us-east-1'
|
||||
# region = "us-east-1"
|
||||
#
|
||||
# ## Amazon Credentials
|
||||
# ## Credentials are loaded in the following order
|
||||
@@ -170,7 +178,7 @@
|
||||
# #shared_credential_file = ""
|
||||
#
|
||||
# ## Namespace for the CloudWatch MetricDatums
|
||||
# namespace = 'InfluxData/Telegraf'
|
||||
# namespace = "InfluxData/Telegraf"
|
||||
|
||||
|
||||
# # Configuration for DataDog API to send metrics to.
|
||||
@@ -357,6 +365,30 @@
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Send telegraf measurements to NATS
|
||||
# [[outputs.nats]]
|
||||
# ## URLs of NATS servers
|
||||
# servers = ["nats://localhost:4222"]
|
||||
# ## Optional credentials
|
||||
# # username = ""
|
||||
# # password = ""
|
||||
# ## NATS subject for producer messages
|
||||
# subject = "telegraf"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
|
||||
|
||||
# # Send telegraf measurements to NSQD
|
||||
# [[outputs.nsq]]
|
||||
# ## Location of nsqd instance listening on TCP
|
||||
@@ -376,13 +408,18 @@
|
||||
# ## prefix for metrics keys
|
||||
# prefix = "my.specific.prefix."
|
||||
#
|
||||
# ## Telnet Mode ##
|
||||
# ## DNS name of the OpenTSDB server in telnet mode
|
||||
# ## DNS name of the OpenTSDB server
|
||||
# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
|
||||
# ## telnet API. "http://opentsdb.example.com" will use the Http API.
|
||||
# host = "opentsdb.example.com"
|
||||
#
|
||||
# ## Port of the OpenTSDB server in telnet mode
|
||||
# ## Port of the OpenTSDB server
|
||||
# port = 4242
|
||||
#
|
||||
# ## Number of data points to send to OpenTSDB in Http requests.
|
||||
# ## Not used with telnet API.
|
||||
# httpBatchSize = 50
|
||||
#
|
||||
# ## Debug true - Prints OpenTSDB communication
|
||||
# debug = false
|
||||
|
||||
@@ -404,6 +441,30 @@
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
# PROCESSOR PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # Print all metrics that pass through this filter.
|
||||
# [[processors.printer]]
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
# AGGREGATOR PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # Keep the aggregate min/max of each metric passing through.
|
||||
# [[aggregators.minmax]]
|
||||
# ## General Aggregator Arguments:
|
||||
# ## The period on which to flush & clear the aggregator.
|
||||
# period = "30s"
|
||||
# ## If true, the original metric will be dropped by the
|
||||
# ## aggregator and will not get sent to the output plugins.
|
||||
# drop_original = false
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUT PLUGINS #
|
||||
###############################################################################
|
||||
@@ -414,8 +475,8 @@
|
||||
percpu = true
|
||||
## Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
## Comment this line if you want the raw CPU time metrics
|
||||
fielddrop = ["time_*"]
|
||||
## If true, collect raw CPU time metrics.
|
||||
collect_cpu_time = false
|
||||
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
@@ -530,14 +591,7 @@
|
||||
# ## suffix used to identify socket files
|
||||
# socket_suffix = "asok"
|
||||
#
|
||||
# ## Ceph user to authenticate as, ceph will search for the corresponding keyring
|
||||
# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the
|
||||
# ## client section of ceph.conf for example:
|
||||
# ##
|
||||
# ## [client.telegraf]
|
||||
# ## keyring = /etc/ceph/client.telegraf.keyring
|
||||
# ##
|
||||
# ## Consult the ceph documentation for more detail on keyring generation.
|
||||
# ## Ceph user to authenticate as
|
||||
# ceph_user = "client.admin"
|
||||
#
|
||||
# ## Ceph configuration to use to locate the cluster
|
||||
@@ -546,28 +600,30 @@
|
||||
# ## Whether to gather statistics via the admin socket
|
||||
# gather_admin_socket_stats = true
|
||||
#
|
||||
# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
|
||||
# ## to be specified
|
||||
# ## Whether to gather statistics via ceph commands
|
||||
# gather_cluster_stats = true
|
||||
|
||||
|
||||
# # Read specific statistics per cgroup
|
||||
# [[inputs.cgroup]]
|
||||
# ## Directories in which to look for files, globs are supported.
|
||||
# # paths = [
|
||||
# # "/cgroup/memory",
|
||||
# # "/cgroup/memory/child1",
|
||||
# # "/cgroup/memory/child2/*",
|
||||
# # ]
|
||||
# ## cgroup stat fields, as file names, globs are supported.
|
||||
# ## these file names are appended to each path from above.
|
||||
# # files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
# ## Directories in which to look for files, globs are supported.
|
||||
# ## Consider restricting paths to the set of cgroups you really
|
||||
# ## want to monitor if you have a large number of cgroups, to avoid
|
||||
# ## any cardinality issues.
|
||||
# # paths = [
|
||||
# # "/cgroup/memory",
|
||||
# # "/cgroup/memory/child1",
|
||||
# # "/cgroup/memory/child2/*",
|
||||
# # ]
|
||||
# ## cgroup stat fields, as file names, globs are supported.
|
||||
# ## these file names are appended to each path from above.
|
||||
# # files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
|
||||
|
||||
# # Pull Metric Statistics from Amazon CloudWatch
|
||||
# [[inputs.cloudwatch]]
|
||||
# ## Amazon Region
|
||||
# region = 'us-east-1'
|
||||
# region = "us-east-1"
|
||||
#
|
||||
# ## Amazon Credentials
|
||||
# ## Credentials are loaded in the following order
|
||||
@@ -585,32 +641,37 @@
|
||||
# #shared_credential_file = ""
|
||||
#
|
||||
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
# period = '1m'
|
||||
# period = "5m"
|
||||
#
|
||||
# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
# delay = '1m'
|
||||
# delay = "5m"
|
||||
#
|
||||
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
# ## gaps or overlap in pulled data
|
||||
# interval = '1m'
|
||||
# interval = "5m"
|
||||
#
|
||||
# ## Configure the TTL for the internal cache of metrics.
|
||||
# ## Defaults to 1 hr if not specified
|
||||
# #cache_ttl = '10m'
|
||||
# #cache_ttl = "10m"
|
||||
#
|
||||
# ## Metric Statistic Namespace (required)
|
||||
# namespace = 'AWS/ELB'
|
||||
# namespace = "AWS/ELB"
|
||||
#
|
||||
# ## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
# ## maximum of 10. Optional - default value is 10.
|
||||
# ratelimit = 10
|
||||
#
|
||||
# ## Metrics to Pull (optional)
|
||||
# ## Defaults to all Metrics in Namespace if nothing is provided
|
||||
# ## Refreshes Namespace available metrics every 1h
|
||||
# #[[inputs.cloudwatch.metrics]]
|
||||
# # names = ['Latency', 'RequestCount']
|
||||
# # names = ["Latency", "RequestCount"]
|
||||
# #
|
||||
# # ## Dimension filters for Metric (optional)
|
||||
# # [[inputs.cloudwatch.metrics.dimensions]]
|
||||
# # name = 'LoadBalancerName'
|
||||
# # value = 'p-example'
|
||||
# # name = "LoadBalancerName"
|
||||
# # value = "p-example"
|
||||
|
||||
|
||||
# # Gather health check statuses from services registered in Consul
|
||||
@@ -718,6 +779,9 @@
|
||||
# ## specify a list of one or more Elasticsearch servers
|
||||
# servers = ["http://localhost:9200"]
|
||||
#
|
||||
# ## Timeout for HTTP requests to the elastic search server(s)
|
||||
# http_timeout = "5s"
|
||||
#
|
||||
# ## set local to false when you want to read the indices stats from all nodes
|
||||
# ## within the cluster
|
||||
# local = true
|
||||
@@ -813,12 +877,15 @@
|
||||
# ## An array of address to gather stats about. Specify an ip on hostname
|
||||
# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
# ## Make sure you specify the complete path to the stats endpoint
|
||||
# ## ie 10.10.3.33:1936/haproxy?stats
|
||||
# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
|
||||
# #
|
||||
# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
|
||||
# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
|
||||
# ## Or you can also use local socket
|
||||
# ## servers = ["socket:/run/haproxy/admin.sock"]
|
||||
# ##
|
||||
# ## You can also use local socket with standard wildcard globbing.
|
||||
# ## Server address not starting with 'http' will be treated as a possible
|
||||
# ## socket, so both examples below are valid.
|
||||
# ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
|
||||
|
||||
|
||||
# # HTTP/HTTPS request given an address a method and a timeout
|
||||
@@ -860,6 +927,8 @@
|
||||
# "http://localhost:9999/stats/",
|
||||
# "http://localhost:9998/stats/",
|
||||
# ]
|
||||
# ## Set response_timeout (default 5 seconds)
|
||||
# response_timeout = "5s"
|
||||
#
|
||||
# ## HTTP method to use: GET or POST (case-sensitive)
|
||||
# method = "GET"
|
||||
@@ -899,6 +968,9 @@
|
||||
# urls = [
|
||||
# "http://localhost:8086/debug/vars"
|
||||
# ]
|
||||
#
|
||||
# ## http request & header timeout
|
||||
# timeout = "5s"
|
||||
|
||||
|
||||
# # Read metrics from one or many bare metal servers
|
||||
@@ -910,22 +982,11 @@
|
||||
# ##
|
||||
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
|
||||
|
||||
# # Gather packets and bytes throughput from iptables
|
||||
# [[inputs.iptables]]
|
||||
# ## iptables require root access on most systems.
|
||||
# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
|
||||
# ## Users must configure sudo to allow telegraf user to run iptables.
|
||||
# ## iptables can be restricted to only use list command "iptables -nvL"
|
||||
# use_sudo = false
|
||||
# ## define the table to monitor:
|
||||
# table = "filter"
|
||||
# ## Defines the chains to monitor:
|
||||
# chains = [ "INPUT" ]
|
||||
|
||||
|
||||
# # Read JMX metrics through Jolokia
|
||||
# [[inputs.jolokia]]
|
||||
# ## This is the context root used to compose the jolokia url
|
||||
# ## NOTE that your jolokia security policy must allow for POST requests.
|
||||
# context = "/jolokia"
|
||||
#
|
||||
# ## This specifies the mode used
|
||||
@@ -969,6 +1030,22 @@
|
||||
# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
|
||||
|
||||
|
||||
# # Read metrics from the kubernetes kubelet api
|
||||
# [[inputs.kubernetes]]
|
||||
# ## URL for the kubelet
|
||||
# url = "http://1.1.1.1:10255"
|
||||
#
|
||||
# ## Use bearer token for authorization
|
||||
# # bearer_token = /path/to/bearer/token
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = /path/to/cafile
|
||||
# # ssl_cert = /path/to/certfile
|
||||
# # ssl_key = /path/to/keyfile
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Read metrics from a LeoFS Server via SNMP
|
||||
# [[inputs.leofs]]
|
||||
# ## An array of URI to gather stats about LeoFS.
|
||||
@@ -1041,8 +1118,6 @@
|
||||
# # "tasks",
|
||||
# # "messages",
|
||||
# # ]
|
||||
# ## Include mesos tasks statistics, default is false
|
||||
# # slave_tasks = true
|
||||
|
||||
|
||||
# # Read metrics from one or many MongoDB servers
|
||||
@@ -1090,13 +1165,13 @@
|
||||
# ## gather metrics from SHOW BINARY LOGS command output
|
||||
# gather_binary_logs = false
|
||||
# #
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
|
||||
# gather_table_io_waits = false
|
||||
# #
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
|
||||
# gather_table_lock_waits = false
|
||||
# #
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
|
||||
# gather_index_io_waits = false
|
||||
# #
|
||||
# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
|
||||
@@ -1186,23 +1261,6 @@
|
||||
# command = "passenger-status -v --show=xml"
|
||||
|
||||
|
||||
# # Read metrics from one or many pgbouncer servers
|
||||
# [[inputs.pgbouncer]]
|
||||
# ## specify address via a url matching:
|
||||
# ## postgres://[pqgotest[:password]]@localhost:port[/dbname]\
|
||||
# ## ?sslmode=[disable|verify-ca|verify-full]
|
||||
# ## or a simple string:
|
||||
# ## host=localhost user=pqotest port=6432 password=... sslmode=... dbname=pgbouncer
|
||||
# ##
|
||||
# ## All connection parameters are optional, except for dbname,
|
||||
# ## you need to set it always as pgbouncer.
|
||||
# address = "host=localhost user=postgres port=6432 sslmode=disable dbname=pgbouncer"
|
||||
#
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered.
|
||||
# # databases = ["app_production", "testing"]
|
||||
|
||||
|
||||
# # Read metrics of phpfpm, via HTTP status page or socket
|
||||
# [[inputs.phpfpm]]
|
||||
# ## An array of addresses to gather stats about. Specify an ip or hostname
|
||||
@@ -1235,13 +1293,13 @@
|
||||
# ## urls to ping
|
||||
# urls = ["www.google.com"] # required
|
||||
# ## number of pings to send per collection (ping -c <COUNT>)
|
||||
# count = 1 # required
|
||||
# # count = 1
|
||||
# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||
# ping_interval = 0.0
|
||||
# # ping_interval = 1.0
|
||||
# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||
# timeout = 1.0
|
||||
# # timeout = 1.0
|
||||
# ## interface to send ping from (ping -I <INTERFACE>)
|
||||
# interface = ""
|
||||
# # interface = ""
|
||||
|
||||
|
||||
# # Read metrics from one or many postgresql servers
|
||||
@@ -1261,8 +1319,12 @@
|
||||
# ##
|
||||
# address = "host=localhost user=postgres sslmode=disable"
|
||||
#
|
||||
# ## A list of databases to explicitly ignore. If not specified, metrics for all
|
||||
# ## databases are gathered. Do NOT use with the 'databases' option.
|
||||
# # ignored_databases = ["postgres", "template0", "template1"]
|
||||
#
|
||||
# ## A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# ## databases are gathered.
|
||||
# ## databases are gathered. Do NOT use with the 'ignore_databases' option.
|
||||
# # databases = ["app_production", "testing"]
|
||||
|
||||
|
||||
@@ -1429,6 +1491,65 @@
|
||||
# servers = ["http://localhost:8098"]
|
||||
|
||||
|
||||
# # Retrieves SNMP values from remote agents
|
||||
# [[inputs.snmp]]
|
||||
# agents = [ "127.0.0.1:161" ]
|
||||
# ## Timeout for each SNMP query.
|
||||
# timeout = "5s"
|
||||
# ## Number of retries to attempt within timeout.
|
||||
# retries = 3
|
||||
# ## SNMP version, values can be 1, 2, or 3
|
||||
# version = 2
|
||||
#
|
||||
# ## SNMP community string.
|
||||
# community = "public"
|
||||
#
|
||||
# ## The GETBULK max-repetitions parameter
|
||||
# max_repetitions = 10
|
||||
#
|
||||
# ## SNMPv3 auth parameters
|
||||
# #sec_name = "myuser"
|
||||
# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
|
||||
# #auth_password = "pass"
|
||||
# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
|
||||
# #context_name = ""
|
||||
# #priv_protocol = "" # Values: "DES", "AES", ""
|
||||
# #priv_password = ""
|
||||
#
|
||||
# ## measurement name
|
||||
# name = "system"
|
||||
# [[inputs.snmp.field]]
|
||||
# name = "hostname"
|
||||
# oid = ".1.0.0.1.1"
|
||||
# [[inputs.snmp.field]]
|
||||
# name = "uptime"
|
||||
# oid = ".1.0.0.1.2"
|
||||
# [[inputs.snmp.field]]
|
||||
# name = "load"
|
||||
# oid = ".1.0.0.1.3"
|
||||
# [[inputs.snmp.field]]
|
||||
# oid = "HOST-RESOURCES-MIB::hrMemorySize"
|
||||
#
|
||||
# [[inputs.snmp.table]]
|
||||
# ## measurement name
|
||||
# name = "remote_servers"
|
||||
# inherit_tags = [ "hostname" ]
|
||||
# [[inputs.snmp.table.field]]
|
||||
# name = "server"
|
||||
# oid = ".1.0.0.0.1.0"
|
||||
# is_tag = true
|
||||
# [[inputs.snmp.table.field]]
|
||||
# name = "connections"
|
||||
# oid = ".1.0.0.0.1.1"
|
||||
# [[inputs.snmp.table.field]]
|
||||
# name = "latency"
|
||||
# oid = ".1.0.0.0.1.2"
|
||||
#
|
||||
# [[inputs.snmp.table]]
|
||||
# ## auto populate table's fields using the MIB
|
||||
# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
|
||||
|
||||
|
||||
# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
|
||||
# [[inputs.snmp_legacy]]
|
||||
# ## Use 'oids.txt' file to translate oids to names
|
||||
@@ -1601,6 +1722,25 @@
|
||||
# SERVICE INPUT PLUGINS #
|
||||
###############################################################################
|
||||
|
||||
# # Influx HTTP write listener
|
||||
# [[inputs.http_listener]]
|
||||
# ## Address and port to host HTTP listener on
|
||||
# service_address = ":8186"
|
||||
#
|
||||
# ## maximum duration before timing out read of the request
|
||||
# read_timeout = "10s"
|
||||
# ## maximum duration before timing out write of the response
|
||||
# write_timeout = "10s"
|
||||
#
|
||||
# ## Maximum allowed http request body size in bytes.
|
||||
# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
|
||||
# max_body_size = 0
|
||||
#
|
||||
# ## Maximum line size allowed to be sent in bytes.
|
||||
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
|
||||
# max_line_size = 0
|
||||
|
||||
|
||||
# # Read metrics from Kafka topic(s)
|
||||
# [[inputs.kafka_consumer]]
|
||||
# ## topic(s) to consume
|
||||
@@ -1693,13 +1833,18 @@
|
||||
# # Read metrics from NATS subject(s)
|
||||
# [[inputs.nats_consumer]]
|
||||
# ## urls of NATS servers
|
||||
# servers = ["nats://localhost:4222"]
|
||||
# # servers = ["nats://localhost:4222"]
|
||||
# ## Use Transport Layer Security
|
||||
# secure = false
|
||||
# # secure = false
|
||||
# ## subject(s) to consume
|
||||
# subjects = ["telegraf"]
|
||||
# # subjects = ["telegraf"]
|
||||
# ## name a queue group
|
||||
# queue_group = "telegraf_consumers"
|
||||
# # queue_group = "telegraf_consumers"
|
||||
#
|
||||
# ## Sets the limits for pending msgs and bytes for each subscription
|
||||
# ## These shouldn't need to be adjusted except in very high throughput scenarios
|
||||
# # pending_message_limit = 65536
|
||||
# # pending_bytes_limit = 67108864
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
@@ -1786,14 +1931,14 @@
|
||||
# # Generic TCP listener
|
||||
# [[inputs.tcp_listener]]
|
||||
# ## Address and port to host TCP listener on
|
||||
# service_address = ":8094"
|
||||
# # service_address = ":8094"
|
||||
#
|
||||
# ## Number of TCP messages allowed to queue up. Once filled, the
|
||||
# ## TCP listener will start dropping packets.
|
||||
# allowed_pending_messages = 10000
|
||||
# # allowed_pending_messages = 10000
|
||||
#
|
||||
# ## Maximum number of concurrent TCP connections to allow
|
||||
# max_tcp_connections = 250
|
||||
# # max_tcp_connections = 250
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
@@ -1805,11 +1950,11 @@
|
||||
# # Generic UDP listener
|
||||
# [[inputs.udp_listener]]
|
||||
# ## Address and port to host UDP listener on
|
||||
# service_address = ":8092"
|
||||
# # service_address = ":8092"
|
||||
#
|
||||
# ## Number of UDP messages allowed to queue up. Once filled, the
|
||||
# ## UDP listener will start dropping packets.
|
||||
# allowed_pending_messages = 10000
|
||||
# # allowed_pending_messages = 10000
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
@@ -1823,6 +1968,9 @@
|
||||
# ## Address and port to host Webhook listener on
|
||||
# service_address = ":1619"
|
||||
#
|
||||
# [inputs.webhooks.filestack]
|
||||
# path = "/filestack"
|
||||
#
|
||||
# [inputs.webhooks.github]
|
||||
# path = "/github"
|
||||
#
|
||||
@@ -1831,4 +1979,3 @@
|
||||
#
|
||||
# [inputs.webhooks.rollbar]
|
||||
# path = "/rollbar"
|
||||
|
||||
|
||||
@@ -42,10 +42,14 @@
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Logging configuration:
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Specify the log file name. The empty string means to log to stdout.
|
||||
logfile = "/Program Files/Telegraf/telegraf.log"
|
||||
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
@@ -85,7 +89,7 @@
|
||||
# Windows Performance Counters plugin.
|
||||
# These are the recommended method of monitoring system metrics on windows,
|
||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||
# which utilizes a lot of system resources.
|
||||
# which utilize more system resources.
|
||||
#
|
||||
# See more configuration examples at:
|
||||
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
||||
@@ -95,70 +99,104 @@
|
||||
# Processor usage, alternative to native, reports on a per core.
|
||||
ObjectName = "Processor"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Interrupt Time",
|
||||
"% Privileged Time",
|
||||
"% User Time",
|
||||
"% Processor Time",
|
||||
]
|
||||
Measurement = "win_cpu"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Disk times and queues
|
||||
ObjectName = "LogicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
|
||||
Counters = [
|
||||
"% Idle Time",
|
||||
"% Disk Time","% Disk Read Time",
|
||||
"% Disk Write Time",
|
||||
"% User Time",
|
||||
"Current Disk Queue Length",
|
||||
]
|
||||
Measurement = "win_disk"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = ["Context Switches/sec","System Calls/sec"]
|
||||
Counters = [
|
||||
"Context Switches/sec",
|
||||
"System Calls/sec",
|
||||
]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
|
||||
# Example query where the Instance portion must be removed to get data back,
|
||||
# such as from the Memory object.
|
||||
ObjectName = "Memory"
|
||||
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
|
||||
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
|
||||
Counters = [
|
||||
"Available Bytes",
|
||||
"Cache Faults/sec",
|
||||
"Demand Zero Faults/sec",
|
||||
"Page Faults/sec",
|
||||
"Pages/sec",
|
||||
"Transition Faults/sec",
|
||||
"Pool Nonpaged Bytes",
|
||||
"Pool Paged Bytes",
|
||||
]
|
||||
# Use 6 x - to remove the Instance bit from the query.
|
||||
Instances = ["------"]
|
||||
Measurement = "win_mem"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
# Set to true to include _Total instance when querying for all (*).
|
||||
#IncludeTotal=false
|
||||
|
||||
|
||||
# Windows system plugins using WMI (disabled by default, using
|
||||
# win_perf_counters over WMI is recommended)
|
||||
|
||||
# Read metrics about cpu usage
|
||||
#[[inputs.cpu]]
|
||||
## Whether to report per-cpu stats or not
|
||||
#percpu = true
|
||||
## Whether to report total system cpu stats or not
|
||||
#totalcpu = true
|
||||
## Comment this line if you want the raw CPU time metrics
|
||||
#fielddrop = ["time_*"]
|
||||
# # Read metrics about cpu usage
|
||||
# [[inputs.cpu]]
|
||||
# ## Whether to report per-cpu stats or not
|
||||
# percpu = true
|
||||
# ## Whether to report total system cpu stats or not
|
||||
# totalcpu = true
|
||||
# ## Comment this line if you want the raw CPU time metrics
|
||||
# fielddrop = ["time_*"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
#[[inputs.disk]]
|
||||
## By default, telegraf gather stats for all mountpoints.
|
||||
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
## mount_points=["/"]
|
||||
|
||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
## present on /run, /var/run, /dev/shm or /dev).
|
||||
#ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
# # Read metrics about disk usage by mount point
|
||||
# [[inputs.disk]]
|
||||
# ## By default, telegraf gather stats for all mountpoints.
|
||||
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
# ## mount_points=["/"]
|
||||
#
|
||||
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
#[[inputs.diskio]]
|
||||
## By default, telegraf will gather stats for all devices including
|
||||
## disk partitions.
|
||||
## Setting devices will restrict the stats to the specified devices.
|
||||
## devices = ["sda", "sdb"]
|
||||
## Uncomment the following line if you do not need disk serial numbers.
|
||||
## skip_serial_number = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
#[[inputs.mem]]
|
||||
# no configuration
|
||||
# # Read metrics about disk IO by device
|
||||
# [[inputs.diskio]]
|
||||
# ## By default, telegraf will gather stats for all devices including
|
||||
# ## disk partitions.
|
||||
# ## Setting devices will restrict the stats to the specified devices.
|
||||
# ## devices = ["sda", "sdb"]
|
||||
# ## Uncomment the following line if you do not need disk serial numbers.
|
||||
# ## skip_serial_number = true
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
#[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
# # Read metrics about memory usage
|
||||
# [[inputs.mem]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Read metrics about swap memory usage
|
||||
# [[inputs.swap]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
@@ -11,6 +13,8 @@ type Buffer struct {
|
||||
drops int
|
||||
// total metrics added
|
||||
total int
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewBuffer returns a Buffer
|
||||
@@ -61,11 +65,13 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||
// the batch will be of maximum length batchSize. It can be less than batchSize,
|
||||
// if the length of Buffer is less than batchSize.
|
||||
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
|
||||
b.mu.Lock()
|
||||
n := min(len(b.buf), batchSize)
|
||||
out := make([]telegraf.Metric, n)
|
||||
for i := 0; i < n; i++ {
|
||||
out[i] = <-b.buf
|
||||
}
|
||||
b.mu.Unlock()
|
||||
return out
|
||||
}
|
||||
|
||||
|
||||
@@ -11,15 +11,18 @@ import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/models"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
|
||||
"github.com/influxdata/config"
|
||||
@@ -47,9 +50,12 @@ type Config struct {
|
||||
InputFilters []string
|
||||
OutputFilters []string
|
||||
|
||||
Agent *AgentConfig
|
||||
Inputs []*models.RunningInput
|
||||
Outputs []*models.RunningOutput
|
||||
Agent *AgentConfig
|
||||
Inputs []*models.RunningInput
|
||||
Outputs []*models.RunningOutput
|
||||
Aggregators []*models.RunningAggregator
|
||||
// Processors have a slice wrapper type because they need to be sorted
|
||||
Processors models.RunningProcessors
|
||||
}
|
||||
|
||||
func NewConfig() *Config {
|
||||
@@ -64,6 +70,7 @@ func NewConfig() *Config {
|
||||
Tags: make(map[string]string),
|
||||
Inputs: make([]*models.RunningInput, 0),
|
||||
Outputs: make([]*models.RunningOutput, 0),
|
||||
Processors: make([]*models.RunningProcessor, 0),
|
||||
InputFilters: make([]string, 0),
|
||||
OutputFilters: make([]string, 0),
|
||||
}
|
||||
@@ -125,6 +132,9 @@ type AgentConfig struct {
|
||||
// Debug is the option for running in debug mode
|
||||
Debug bool
|
||||
|
||||
// Logfile specifies the file to send logs to
|
||||
Logfile string
|
||||
|
||||
// Quiet is the option for running in quiet mode
|
||||
Quiet bool
|
||||
Hostname string
|
||||
@@ -135,7 +145,7 @@ type AgentConfig struct {
|
||||
func (c *Config) InputNames() []string {
|
||||
var name []string
|
||||
for _, input := range c.Inputs {
|
||||
name = append(name, input.Name)
|
||||
name = append(name, input.Name())
|
||||
}
|
||||
return name
|
||||
}
|
||||
@@ -195,12 +205,15 @@ var header = `# Telegraf Configuration
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will send metrics to outputs in batches of at
|
||||
## most metric_batch_size metrics.
|
||||
## Telegraf will send metrics to outputs in batches of at most
|
||||
## metric_batch_size metrics.
|
||||
## This controls the size of writes that Telegraf sends to output plugins.
|
||||
metric_batch_size = 1000
|
||||
|
||||
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
|
||||
## output, and will flush this buffer on a successful write. Oldest metrics
|
||||
## are dropped first when this buffer fills.
|
||||
## This buffer only fills when writes fail to output plugin(s).
|
||||
metric_buffer_limit = 10000
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
@@ -222,10 +235,15 @@ var header = `# Telegraf Configuration
|
||||
## Precision will NOT be used for service inputs, such as logparser and statsd.
|
||||
## Valid values are "ns", "us" (or "µs"), "ms", "s".
|
||||
precision = ""
|
||||
## Run telegraf in debug mode
|
||||
|
||||
## Logging configuration:
|
||||
## Run telegraf with debug log messages.
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
## Run telegraf in quiet mode (error log messages only).
|
||||
quiet = false
|
||||
## Specify the log file name. The empty string means to log to stderr.
|
||||
logfile = ""
|
||||
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
||||
@@ -237,6 +255,20 @@ var header = `# Telegraf Configuration
|
||||
###############################################################################
|
||||
`
|
||||
|
||||
var processorHeader = `
|
||||
|
||||
###############################################################################
|
||||
# PROCESSOR PLUGINS #
|
||||
###############################################################################
|
||||
`
|
||||
|
||||
var aggregatorHeader = `
|
||||
|
||||
###############################################################################
|
||||
# AGGREGATOR PLUGINS #
|
||||
###############################################################################
|
||||
`
|
||||
|
||||
var inputHeader = `
|
||||
|
||||
###############################################################################
|
||||
@@ -252,9 +284,15 @@ var serviceInputHeader = `
|
||||
`
|
||||
|
||||
// PrintSampleConfig prints the sample config
|
||||
func PrintSampleConfig(inputFilters []string, outputFilters []string) {
|
||||
func PrintSampleConfig(
|
||||
inputFilters []string,
|
||||
outputFilters []string,
|
||||
aggregatorFilters []string,
|
||||
processorFilters []string,
|
||||
) {
|
||||
fmt.Printf(header)
|
||||
|
||||
// print output plugins
|
||||
if len(outputFilters) != 0 {
|
||||
printFilteredOutputs(outputFilters, false)
|
||||
} else {
|
||||
@@ -270,6 +308,33 @@ func PrintSampleConfig(inputFilters []string, outputFilters []string) {
|
||||
printFilteredOutputs(pnames, true)
|
||||
}
|
||||
|
||||
// print processor plugins
|
||||
fmt.Printf(processorHeader)
|
||||
if len(processorFilters) != 0 {
|
||||
printFilteredProcessors(processorFilters, false)
|
||||
} else {
|
||||
pnames := []string{}
|
||||
for pname := range processors.Processors {
|
||||
pnames = append(pnames, pname)
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
printFilteredProcessors(pnames, true)
|
||||
}
|
||||
|
||||
// pring aggregator plugins
|
||||
fmt.Printf(aggregatorHeader)
|
||||
if len(aggregatorFilters) != 0 {
|
||||
printFilteredAggregators(aggregatorFilters, false)
|
||||
} else {
|
||||
pnames := []string{}
|
||||
for pname := range aggregators.Aggregators {
|
||||
pnames = append(pnames, pname)
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
printFilteredAggregators(pnames, true)
|
||||
}
|
||||
|
||||
// print input plugins
|
||||
fmt.Printf(inputHeader)
|
||||
if len(inputFilters) != 0 {
|
||||
printFilteredInputs(inputFilters, false)
|
||||
@@ -287,6 +352,42 @@ func PrintSampleConfig(inputFilters []string, outputFilters []string) {
|
||||
}
|
||||
}
|
||||
|
||||
func printFilteredProcessors(processorFilters []string, commented bool) {
|
||||
// Filter processors
|
||||
var pnames []string
|
||||
for pname := range processors.Processors {
|
||||
if sliceContains(pname, processorFilters) {
|
||||
pnames = append(pnames, pname)
|
||||
}
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
|
||||
// Print Outputs
|
||||
for _, pname := range pnames {
|
||||
creator := processors.Processors[pname]
|
||||
output := creator()
|
||||
printConfig(pname, output, "processors", commented)
|
||||
}
|
||||
}
|
||||
|
||||
func printFilteredAggregators(aggregatorFilters []string, commented bool) {
|
||||
// Filter outputs
|
||||
var anames []string
|
||||
for aname := range aggregators.Aggregators {
|
||||
if sliceContains(aname, aggregatorFilters) {
|
||||
anames = append(anames, aname)
|
||||
}
|
||||
}
|
||||
sort.Strings(anames)
|
||||
|
||||
// Print Outputs
|
||||
for _, aname := range anames {
|
||||
creator := aggregators.Aggregators[aname]
|
||||
output := creator()
|
||||
printConfig(aname, output, "aggregators", commented)
|
||||
}
|
||||
}
|
||||
|
||||
func printFilteredInputs(inputFilters []string, commented bool) {
|
||||
// Filter inputs
|
||||
var pnames []string
|
||||
@@ -404,24 +505,21 @@ func PrintOutputConfig(name string) error {
|
||||
}
|
||||
|
||||
func (c *Config) LoadDirectory(path string) error {
|
||||
directoryEntries, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range directoryEntries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
walkfn := func(thispath string, info os.FileInfo, _ error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
name := entry.Name()
|
||||
name := info.Name()
|
||||
if len(name) < 6 || name[len(name)-5:] != ".conf" {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
err := c.LoadConfig(filepath.Join(path, name))
|
||||
err := c.LoadConfig(thispath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
return filepath.Walk(path, walkfn)
|
||||
}
|
||||
|
||||
// Try to find a default config file at these locations (in order):
|
||||
@@ -438,7 +536,7 @@ func getDefaultConfigPath() (string, error) {
|
||||
}
|
||||
for _, path := range []string{envfile, homefile, etcfile} {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
log.Printf("Using config file: %s", path)
|
||||
log.Printf("I! Using config file: %s", path)
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
@@ -469,7 +567,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
|
||||
log.Printf("Could not parse [global_tags] config\n")
|
||||
log.Printf("E! Could not parse [global_tags] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
@@ -482,7 +580,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
return fmt.Errorf("%s: invalid configuration", path)
|
||||
}
|
||||
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
|
||||
log.Printf("Could not parse [agent] config\n")
|
||||
log.Printf("E! Could not parse [agent] config\n")
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
@@ -499,6 +597,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
case "outputs":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
// legacy [outputs.influxdb] support
|
||||
case *ast.Table:
|
||||
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
@@ -517,6 +616,7 @@ func (c *Config) LoadConfig(path string) error {
|
||||
case "inputs", "plugins":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
// legacy [inputs.cpu] support
|
||||
case *ast.Table:
|
||||
if err = c.addInput(pluginName, pluginSubTable); err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
@@ -532,6 +632,34 @@ func (c *Config) LoadConfig(path string) error {
|
||||
pluginName, path)
|
||||
}
|
||||
}
|
||||
case "processors":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addProcessor(pluginName, t); err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s, file %s",
|
||||
pluginName, path)
|
||||
}
|
||||
}
|
||||
case "aggregators":
|
||||
for pluginName, pluginVal := range subTable.Fields {
|
||||
switch pluginSubTable := pluginVal.(type) {
|
||||
case []*ast.Table:
|
||||
for _, t := range pluginSubTable {
|
||||
if err = c.addAggregator(pluginName, t); err != nil {
|
||||
return fmt.Errorf("Error parsing %s, %s", path, err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unsupported config format: %s, file %s",
|
||||
pluginName, path)
|
||||
}
|
||||
}
|
||||
// Assume it's an input input for legacy config file support if no other
|
||||
// identifiers are present
|
||||
default:
|
||||
@@ -540,6 +668,10 @@ func (c *Config) LoadConfig(path string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.Processors) > 1 {
|
||||
sort.Sort(c.Processors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -572,6 +704,52 @@ func parseFile(fpath string) (*ast.Table, error) {
|
||||
return toml.Parse(contents)
|
||||
}
|
||||
|
||||
func (c *Config) addAggregator(name string, table *ast.Table) error {
|
||||
creator, ok := aggregators.Aggregators[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested aggregator: %s", name)
|
||||
}
|
||||
aggregator := creator()
|
||||
|
||||
conf, err := buildAggregator(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, aggregator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) addProcessor(name string, table *ast.Table) error {
|
||||
creator, ok := processors.Processors[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested processor: %s", name)
|
||||
}
|
||||
processor := creator()
|
||||
|
||||
processorConfig, err := buildProcessor(name, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := config.UnmarshalTable(table, processor); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rf := &models.RunningProcessor{
|
||||
Name: name,
|
||||
Processor: processor,
|
||||
Config: processorConfig,
|
||||
}
|
||||
|
||||
c.Processors = append(c.Processors, rf)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) addOutput(name string, table *ast.Table) error {
|
||||
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
|
||||
return nil
|
||||
@@ -644,7 +822,6 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
}
|
||||
|
||||
rp := &models.RunningInput{
|
||||
Name: name,
|
||||
Input: input,
|
||||
Config: pluginConfig,
|
||||
}
|
||||
@@ -652,6 +829,144 @@ func (c *Config) addInput(name string, table *ast.Table) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildAggregator parses Aggregator specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// models.AggregatorConfig to be inserted into models.RunningAggregator
|
||||
func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) {
|
||||
unsupportedFields := []string{"tagexclude", "taginclude"}
|
||||
for _, field := range unsupportedFields {
|
||||
if _, ok := tbl.Fields[field]; ok {
|
||||
return nil, fmt.Errorf("%s is not supported for aggregator plugins (%s).",
|
||||
field, name)
|
||||
}
|
||||
}
|
||||
|
||||
conf := &models.AggregatorConfig{
|
||||
Name: name,
|
||||
Delay: time.Millisecond * 100,
|
||||
Period: time.Second * 30,
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["period"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
dur, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf.Period = dur
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["delay"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
dur, err := time.ParseDuration(str.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf.Delay = dur
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["drop_original"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Boolean); ok {
|
||||
var err error
|
||||
conf.DropOriginal, err = strconv.ParseBool(b.Value)
|
||||
if err != nil {
|
||||
log.Printf("Error parsing boolean value for %s: %s\n", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_prefix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
conf.MeasurementPrefix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_suffix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
conf.MeasurementSuffix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_override"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
conf.NameOverride = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
conf.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, conf.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "period")
|
||||
delete(tbl.Fields, "delay")
|
||||
delete(tbl.Fields, "drop_original")
|
||||
delete(tbl.Fields, "name_prefix")
|
||||
delete(tbl.Fields, "name_suffix")
|
||||
delete(tbl.Fields, "name_override")
|
||||
delete(tbl.Fields, "tags")
|
||||
var err error
|
||||
conf.Filter, err = buildFilter(tbl)
|
||||
if err != nil {
|
||||
return conf, err
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// buildProcessor parses Processor specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// models.ProcessorConfig to be inserted into models.RunningProcessor
|
||||
func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) {
|
||||
conf := &models.ProcessorConfig{Name: name}
|
||||
unsupportedFields := []string{"tagexclude", "taginclude", "fielddrop", "fieldpass"}
|
||||
for _, field := range unsupportedFields {
|
||||
if _, ok := tbl.Fields[field]; ok {
|
||||
return nil, fmt.Errorf("%s is not supported for processor plugins (%s).",
|
||||
field, name)
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["order"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if b, ok := kv.Value.(*ast.Integer); ok {
|
||||
var err error
|
||||
conf.Order, err = strconv.ParseInt(b.Value, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("Error parsing int value for %s: %s\n", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "order")
|
||||
var err error
|
||||
conf.Filter, err = buildFilter(tbl)
|
||||
if err != nil {
|
||||
return conf, err
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// buildFilter builds a Filter
|
||||
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
|
||||
// be inserted into the models.OutputConfig/models.InputConfig
|
||||
@@ -835,7 +1150,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for input %s\n", name)
|
||||
log.Printf("E! Could not parse tags for input %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,21 +12,23 @@ import (
|
||||
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
|
||||
|
||||
type GlobPath struct {
|
||||
path string
|
||||
hasMeta bool
|
||||
g glob.Glob
|
||||
root string
|
||||
path string
|
||||
hasMeta bool
|
||||
hasSuperMeta bool
|
||||
g glob.Glob
|
||||
root string
|
||||
}
|
||||
|
||||
func Compile(path string) (*GlobPath, error) {
|
||||
out := GlobPath{
|
||||
hasMeta: hasMeta(path),
|
||||
path: path,
|
||||
hasMeta: hasMeta(path),
|
||||
hasSuperMeta: hasSuperMeta(path),
|
||||
path: path,
|
||||
}
|
||||
|
||||
// if there are no glob meta characters in the path, don't bother compiling
|
||||
// a glob object or finding the root directory. (see short-circuit in Match)
|
||||
if !out.hasMeta {
|
||||
if !out.hasMeta || !out.hasSuperMeta {
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
@@ -48,6 +50,17 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||
}
|
||||
return out
|
||||
}
|
||||
if !g.hasSuperMeta {
|
||||
out := make(map[string]os.FileInfo)
|
||||
files, _ := filepath.Glob(g.path)
|
||||
for _, file := range files {
|
||||
info, err := os.Stat(file)
|
||||
if !os.IsNotExist(err) {
|
||||
out[file] = info
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
return walkFilePath(g.root, g.g)
|
||||
}
|
||||
|
||||
@@ -96,3 +109,8 @@ func findRootDir(path string) string {
|
||||
func hasMeta(path string) bool {
|
||||
return strings.IndexAny(path, "*?[") >= 0
|
||||
}
|
||||
|
||||
// hasSuperMeta reports whether path contains any super magic glob characters (**).
|
||||
func hasSuperMeta(path string) bool {
|
||||
return strings.Index(path, "**") >= 0
|
||||
}
|
||||
|
||||
@@ -35,12 +35,21 @@ type Duration struct {
|
||||
// UnmarshalTOML parses the duration from the TOML config file
|
||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||
var err error
|
||||
// Parse string duration, ie, "1s"
|
||||
d.Duration, err = time.ParseDuration(string(b[1 : len(b)-1]))
|
||||
|
||||
// see if we can straight convert it
|
||||
d.Duration, err = time.ParseDuration(string(b))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse string duration, ie, "1s"
|
||||
if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {
|
||||
d.Duration, err = time.ParseDuration(uq)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// First try parsing as integer seconds
|
||||
sI, err := strconv.ParseInt(string(b), 10, 64)
|
||||
if err == nil {
|
||||
@@ -198,7 +207,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||
return err
|
||||
case <-timer.C:
|
||||
if err := c.Process.Kill(); err != nil {
|
||||
log.Printf("FATAL error killing process: %s", err)
|
||||
log.Printf("E! FATAL error killing process: %s", err)
|
||||
return err
|
||||
}
|
||||
// wait for the command to return after killing it
|
||||
|
||||
@@ -131,3 +131,22 @@ func TestRandomSleep(t *testing.T) {
|
||||
elapsed = time.Since(s)
|
||||
assert.True(t, elapsed < time.Millisecond*150)
|
||||
}
|
||||
|
||||
func TestDuration(t *testing.T) {
|
||||
var d Duration
|
||||
|
||||
d.UnmarshalTOML([]byte(`"1s"`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`1s`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`10`))
|
||||
assert.Equal(t, 10*time.Second, d.Duration)
|
||||
|
||||
d = Duration{}
|
||||
d.UnmarshalTOML([]byte(`1.5`))
|
||||
assert.Equal(t, time.Second, d.Duration)
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func (f *Filter) Compile() error {
|
||||
// Apply applies the filter to the given measurement name, fields map, and
|
||||
// tags map. It will return false if the metric should be "filtered out", and
|
||||
// true if the metric should "pass".
|
||||
// It will modify tags in-place if they need to be deleted.
|
||||
// It will modify tags & fields in-place if they need to be deleted.
|
||||
func (f *Filter) Apply(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
|
||||
154
internal/models/makemetric.go
Normal file
154
internal/models/makemetric.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
// makemetric is used by both RunningAggregator & RunningInput
|
||||
// to make metrics.
|
||||
// nameOverride: override the name of the measurement being made.
|
||||
// namePrefix: add this prefix to each measurement name.
|
||||
// nameSuffix: add this suffix to each measurement name.
|
||||
// pluginTags: these are tags that are specific to this plugin.
|
||||
// daemonTags: these are daemon-wide global tags, and get applied after pluginTags.
|
||||
// filter: this is a filter to apply to each metric being made.
|
||||
// applyFilter: if false, the above filter is not applied to each metric.
|
||||
// This is used by Aggregators, because aggregators use filters
|
||||
// on incoming metrics instead of on created metrics.
|
||||
// TODO refactor this to not have such a huge func signature.
|
||||
func makemetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
nameOverride string,
|
||||
namePrefix string,
|
||||
nameSuffix string,
|
||||
pluginTags map[string]string,
|
||||
daemonTags map[string]string,
|
||||
filter Filter,
|
||||
applyFilter bool,
|
||||
debug bool,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
if len(fields) == 0 || len(measurement) == 0 {
|
||||
return nil
|
||||
}
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(nameOverride) != 0 {
|
||||
measurement = nameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(namePrefix) != 0 {
|
||||
measurement = namePrefix + measurement
|
||||
}
|
||||
if len(nameSuffix) != 0 {
|
||||
measurement = measurement + nameSuffix
|
||||
}
|
||||
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range pluginTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range daemonTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the metric filter(s)
|
||||
// for aggregators, the filter does not get applied when the metric is made.
|
||||
// instead, the filter is applied to metric incoming into the plugin.
|
||||
// ie, it gets applied in the RunningAggregator.Apply function.
|
||||
if applyFilter {
|
||||
if ok := filter.Apply(measurement, fields, tags); !ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fields {
|
||||
// Validate uint64 and float64 fields
|
||||
// convert all int & uint types to int64
|
||||
switch val := v.(type) {
|
||||
case nil:
|
||||
// delete nil fields
|
||||
delete(fields, k)
|
||||
case uint:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int8:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int16:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case int32:
|
||||
fields[k] = int64(val)
|
||||
continue
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
}
|
||||
continue
|
||||
case float32:
|
||||
fields[k] = float64(val)
|
||||
continue
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if debug {
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
delete(fields, k)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
fields[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
var m telegraf.Metric
|
||||
var err error
|
||||
switch mType {
|
||||
case telegraf.Counter:
|
||||
m, err = telegraf.NewCounterMetric(measurement, tags, fields, t)
|
||||
case telegraf.Gauge:
|
||||
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, t)
|
||||
default:
|
||||
m, err = telegraf.NewMetric(measurement, tags, fields, t)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
164
internal/models/running_aggregator.go
Normal file
164
internal/models/running_aggregator.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningAggregator struct {
|
||||
a telegraf.Aggregator
|
||||
Config *AggregatorConfig
|
||||
|
||||
metrics chan telegraf.Metric
|
||||
|
||||
periodStart time.Time
|
||||
periodEnd time.Time
|
||||
}
|
||||
|
||||
func NewRunningAggregator(
|
||||
a telegraf.Aggregator,
|
||||
conf *AggregatorConfig,
|
||||
) *RunningAggregator {
|
||||
return &RunningAggregator{
|
||||
a: a,
|
||||
Config: conf,
|
||||
metrics: make(chan telegraf.Metric, 100),
|
||||
}
|
||||
}
|
||||
|
||||
// AggregatorConfig containing configuration parameters for the running
|
||||
// aggregator plugin.
|
||||
type AggregatorConfig struct {
|
||||
Name string
|
||||
|
||||
DropOriginal bool
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
|
||||
Period time.Duration
|
||||
Delay time.Duration
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) Name() string {
|
||||
return "aggregators." + r.Config.Name
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
m := makemetric(
|
||||
measurement,
|
||||
fields,
|
||||
tags,
|
||||
r.Config.NameOverride,
|
||||
r.Config.MeasurementPrefix,
|
||||
r.Config.MeasurementSuffix,
|
||||
r.Config.Tags,
|
||||
nil,
|
||||
r.Config.Filter,
|
||||
false,
|
||||
false,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
|
||||
m.SetAggregate(true)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Add applies the given metric to the aggregator.
|
||||
// Before applying to the plugin, it will run any defined filters on the metric.
|
||||
// Apply returns true if the original metric should be dropped.
|
||||
func (r *RunningAggregator) Add(in telegraf.Metric) bool {
|
||||
if r.Config.Filter.IsActive() {
|
||||
// check if the aggregator should apply this metric
|
||||
name := in.Name()
|
||||
fields := in.Fields()
|
||||
tags := in.Tags()
|
||||
t := in.Time()
|
||||
if ok := r.Config.Filter.Apply(name, fields, tags); !ok {
|
||||
// aggregator should not apply this metric
|
||||
return false
|
||||
}
|
||||
|
||||
in, _ = telegraf.NewMetric(name, tags, fields, t)
|
||||
}
|
||||
|
||||
r.metrics <- in
|
||||
return r.Config.DropOriginal
|
||||
}
|
||||
func (r *RunningAggregator) add(in telegraf.Metric) {
|
||||
r.a.Add(in)
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) push(acc telegraf.Accumulator) {
|
||||
r.a.Push(acc)
|
||||
}
|
||||
|
||||
func (r *RunningAggregator) reset() {
|
||||
r.a.Reset()
|
||||
}
|
||||
|
||||
// Run runs the running aggregator, listens for incoming metrics, and waits
|
||||
// for period ticks to tell it when to push and reset the aggregator.
|
||||
func (r *RunningAggregator) Run(
|
||||
acc telegraf.Accumulator,
|
||||
shutdown chan struct{},
|
||||
) {
|
||||
// The start of the period is truncated to the nearest second.
|
||||
//
|
||||
// Every metric then gets it's timestamp checked and is dropped if it
|
||||
// is not within:
|
||||
//
|
||||
// start < t < end + truncation + delay
|
||||
//
|
||||
// So if we start at now = 00:00.2 with a 10s period and 0.3s delay:
|
||||
// now = 00:00.2
|
||||
// start = 00:00
|
||||
// truncation = 00:00.2
|
||||
// end = 00:10
|
||||
// 1st interval: 00:00 - 00:10.5
|
||||
// 2nd interval: 00:10 - 00:20.5
|
||||
// etc.
|
||||
//
|
||||
now := time.Now()
|
||||
r.periodStart = now.Truncate(time.Second)
|
||||
truncation := now.Sub(r.periodStart)
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
time.Sleep(r.Config.Delay)
|
||||
periodT := time.NewTicker(r.Config.Period)
|
||||
defer periodT.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-shutdown:
|
||||
if len(r.metrics) > 0 {
|
||||
// wait until metrics are flushed before exiting
|
||||
continue
|
||||
}
|
||||
return
|
||||
case m := <-r.metrics:
|
||||
if m.Time().Before(r.periodStart) ||
|
||||
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
|
||||
// the metric is outside the current aggregation period, so
|
||||
// skip it.
|
||||
continue
|
||||
}
|
||||
r.add(m)
|
||||
case <-periodT.C:
|
||||
r.periodStart = r.periodEnd
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
r.push(acc)
|
||||
r.reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
256
internal/models/running_aggregator_test.go
Normal file
256
internal/models/running_aggregator_test.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
a := &TestAggregator{}
|
||||
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"*"},
|
||||
},
|
||||
Period: time.Millisecond * 500,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Millisecond*150),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
for {
|
||||
time.Sleep(time.Millisecond)
|
||||
if atomic.LoadInt64(&a.sum) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
|
||||
}
|
||||
|
||||
func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||
a := &TestAggregator{}
|
||||
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"*"},
|
||||
},
|
||||
Period: time.Millisecond * 500,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
// metric before current period
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(-time.Hour),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
// metric after current period
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Hour),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
// "now" metric
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Millisecond*50),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
for {
|
||||
time.Sleep(time.Millisecond)
|
||||
if atomic.LoadInt64(&a.sum) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
|
||||
}
|
||||
|
||||
func TestAddAndPushOnePeriod(t *testing.T) {
|
||||
a := &TestAggregator{}
|
||||
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"*"},
|
||||
},
|
||||
Period: time.Millisecond * 500,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
shutdown := make(chan struct{})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ra.Run(&acc, shutdown)
|
||||
}()
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now().Add(time.Millisecond*100),
|
||||
)
|
||||
assert.False(t, ra.Add(m))
|
||||
|
||||
for {
|
||||
time.Sleep(time.Millisecond)
|
||||
if acc.NMetrics() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)})
|
||||
|
||||
close(shutdown)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestAddDropOriginal(t *testing.T) {
|
||||
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
Filter: Filter{
|
||||
NamePass: []string{"RI*"},
|
||||
},
|
||||
DropOriginal: true,
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now(),
|
||||
)
|
||||
assert.True(t, ra.Add(m))
|
||||
|
||||
// this metric name doesn't match the filter, so Add will return false
|
||||
m2 := ra.MakeMetric(
|
||||
"foobar",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
time.Now(),
|
||||
)
|
||||
assert.False(t, ra.Add(m2))
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricA(t *testing.T) {
|
||||
now := time.Now()
|
||||
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||
Name: "TestRunningAggregator",
|
||||
})
|
||||
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ra.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
}
|
||||
|
||||
type TestAggregator struct {
|
||||
sum int64
|
||||
}
|
||||
|
||||
func (t *TestAggregator) Description() string { return "" }
|
||||
func (t *TestAggregator) SampleConfig() string { return "" }
|
||||
func (t *TestAggregator) Reset() {
|
||||
atomic.StoreInt64(&t.sum, 0)
|
||||
}
|
||||
|
||||
func (t *TestAggregator) Push(acc telegraf.Accumulator) {
|
||||
acc.AddFields("TestMetric",
|
||||
map[string]interface{}{"sum": t.sum},
|
||||
map[string]string{},
|
||||
)
|
||||
}
|
||||
|
||||
func (t *TestAggregator) Add(in telegraf.Metric) {
|
||||
for _, v := range in.Fields() {
|
||||
if vi, ok := v.(int64); ok {
|
||||
atomic.AddInt64(&t.sum, vi)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,19 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningInput struct {
|
||||
Name string
|
||||
Input telegraf.Input
|
||||
Config *InputConfig
|
||||
|
||||
trace bool
|
||||
debug bool
|
||||
defaultTags map[string]string
|
||||
}
|
||||
|
||||
// InputConfig containing a name, interval, and filter
|
||||
@@ -22,3 +26,59 @@ type InputConfig struct {
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (r *RunningInput) Name() string {
|
||||
return "inputs." + r.Config.Name
|
||||
}
|
||||
|
||||
// MakeMetric either returns a metric, or returns nil if the metric doesn't
|
||||
// need to be created (because of filtering, an error, etc.)
|
||||
func (r *RunningInput) MakeMetric(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
mType telegraf.ValueType,
|
||||
t time.Time,
|
||||
) telegraf.Metric {
|
||||
m := makemetric(
|
||||
measurement,
|
||||
fields,
|
||||
tags,
|
||||
r.Config.NameOverride,
|
||||
r.Config.MeasurementPrefix,
|
||||
r.Config.MeasurementSuffix,
|
||||
r.Config.Tags,
|
||||
r.defaultTags,
|
||||
r.Config.Filter,
|
||||
true,
|
||||
r.debug,
|
||||
mType,
|
||||
t,
|
||||
)
|
||||
|
||||
if r.trace && m != nil {
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (r *RunningInput) Debug() bool {
|
||||
return r.debug
|
||||
}
|
||||
|
||||
func (r *RunningInput) SetDebug(debug bool) {
|
||||
r.debug = debug
|
||||
}
|
||||
|
||||
func (r *RunningInput) Trace() bool {
|
||||
return r.trace
|
||||
}
|
||||
|
||||
func (r *RunningInput) SetTrace(trace bool) {
|
||||
r.trace = trace
|
||||
}
|
||||
|
||||
func (r *RunningInput) SetDefaultTags(tags map[string]string) {
|
||||
r.defaultTags = tags
|
||||
}
|
||||
|
||||
352
internal/models/running_input_test.go
Normal file
352
internal/models/running_input_test.go
Normal file
@@ -0,0 +1,352 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMakeMetricNoFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
// nil fields should get dropped
|
||||
func TestMakeMetricNilFields(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
"nil": nil,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetric(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Untyped,
|
||||
)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Counter,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Counter,
|
||||
)
|
||||
|
||||
m = ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Gauge,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.Type(),
|
||||
telegraf.Gauge,
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
nil,
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricFilteredOut(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
Tags: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Filter: Filter{NamePass: []string{"foobar"}},
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
assert.NoError(t, ri.Config.Filter.Compile())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
nil,
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Nil(t, m)
|
||||
}
|
||||
|
||||
func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDefaultTags(map[string]string{
|
||||
"foo": "bar",
|
||||
})
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest,foo=bar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
// make an untyped, counter, & gauge metric
|
||||
func TestMakeMetricInfFields(t *testing.T) {
|
||||
inf := math.Inf(1)
|
||||
ninf := math.Inf(-1)
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"value": int(101),
|
||||
"inf": inf,
|
||||
"ninf": ninf,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
},
|
||||
}
|
||||
ri.SetDebug(true)
|
||||
assert.Equal(t, true, ri.Debug())
|
||||
ri.SetTrace(true)
|
||||
assert.Equal(t, true, ri.Trace())
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{
|
||||
"a": int(10),
|
||||
"b": int8(10),
|
||||
"c": int16(10),
|
||||
"d": int32(10),
|
||||
"e": uint(10),
|
||||
"f": uint8(10),
|
||||
"g": uint16(10),
|
||||
"h": uint32(10),
|
||||
"i": uint64(10),
|
||||
"j": float32(10),
|
||||
"k": uint64(9223372036854775810),
|
||||
"l": "foobar",
|
||||
"m": true,
|
||||
},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("RITest a=10i,b=10i,c=10i,d=10i,e=10i,f=10i,g=10i,h=10i,i=10i,j=10,k=9223372036854775807i,l=\"foobar\",m=true %d", now.UnixNano()),
|
||||
m.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameOverride(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
NameOverride: "foobar",
|
||||
},
|
||||
}
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("foobar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNamePrefix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementPrefix: "foobar_",
|
||||
},
|
||||
}
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("foobar_RITest value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
|
||||
func TestMakeMetricNameSuffix(t *testing.T) {
|
||||
now := time.Now()
|
||||
ri := RunningInput{
|
||||
Config: &InputConfig{
|
||||
Name: "TestRunningInput",
|
||||
MeasurementSuffix: "_foobar",
|
||||
},
|
||||
}
|
||||
|
||||
m := ri.MakeMetric(
|
||||
"RITest",
|
||||
map[string]interface{}{"value": int(101)},
|
||||
map[string]string{},
|
||||
telegraf.Untyped,
|
||||
now,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
m.String(),
|
||||
fmt.Sprintf("RITest_foobar value=101i %d", now.UnixNano()),
|
||||
)
|
||||
}
|
||||
@@ -85,7 +85,7 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
||||
// Write writes all cached points to this output.
|
||||
func (ro *RunningOutput) Write() error {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Output [%s] buffer fullness: %d / %d metrics. "+
|
||||
log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+
|
||||
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
||||
ro.Name,
|
||||
ro.failMetrics.Len()+ro.metrics.Len(),
|
||||
@@ -142,7 +142,7 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||
elapsed := time.Since(start)
|
||||
if err == nil {
|
||||
if !ro.Quiet {
|
||||
log.Printf("Output [%s] wrote batch of %d metrics in %s\n",
|
||||
log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n",
|
||||
ro.Name, len(metrics), elapsed)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,7 +132,6 @@ func TestRunningOutput_PassFilter(t *testing.T) {
|
||||
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
|
||||
TagInclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
@@ -154,7 +153,6 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
||||
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
|
||||
TagExclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
@@ -176,7 +174,6 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
||||
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
|
||||
TagExclude: []string{"nothing*"},
|
||||
},
|
||||
}
|
||||
@@ -198,7 +195,6 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
||||
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||
conf := &OutputConfig{
|
||||
Filter: Filter{
|
||||
|
||||
TagInclude: []string{"tag*"},
|
||||
},
|
||||
}
|
||||
|
||||
44
internal/models/running_processor.go
Normal file
44
internal/models/running_processor.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
type RunningProcessor struct {
|
||||
Name string
|
||||
Processor telegraf.Processor
|
||||
Config *ProcessorConfig
|
||||
}
|
||||
|
||||
type RunningProcessors []*RunningProcessor
|
||||
|
||||
func (rp RunningProcessors) Len() int { return len(rp) }
|
||||
func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] }
|
||||
func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order }
|
||||
|
||||
// FilterConfig containing a name and filter
|
||||
type ProcessorConfig struct {
|
||||
Name string
|
||||
Order int64
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
ret := []telegraf.Metric{}
|
||||
|
||||
for _, metric := range in {
|
||||
if rp.Config.Filter.IsActive() {
|
||||
// check if the filter should be applied to this metric
|
||||
if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok {
|
||||
// this means filter should not be applied
|
||||
ret = append(ret, metric)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// This metric should pass through the filter, so call the filter Apply
|
||||
// function and append results to the output slice.
|
||||
ret = append(ret, rp.Processor.Apply(metric)...)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
117
internal/models/running_processor_test.go
Normal file
117
internal/models/running_processor_test.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestProcessor struct {
|
||||
}
|
||||
|
||||
func (f *TestProcessor) SampleConfig() string { return "" }
|
||||
func (f *TestProcessor) Description() string { return "" }
|
||||
|
||||
// Apply renames:
|
||||
// "foo" to "fuz"
|
||||
// "bar" to "baz"
|
||||
// And it also drops measurements named "dropme"
|
||||
func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||
out := make([]telegraf.Metric, 0)
|
||||
for _, m := range in {
|
||||
switch m.Name() {
|
||||
case "foo":
|
||||
out = append(out, testutil.TestMetric(1, "fuz"))
|
||||
case "bar":
|
||||
out = append(out, testutil.TestMetric(1, "baz"))
|
||||
case "dropme":
|
||||
// drop the metric!
|
||||
default:
|
||||
out = append(out, m)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func NewTestRunningProcessor() *RunningProcessor {
|
||||
out := &RunningProcessor{
|
||||
Name: "test",
|
||||
Processor: &TestProcessor{},
|
||||
Config: &ProcessorConfig{Filter: Filter{}},
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestRunningProcessor(t *testing.T) {
|
||||
inmetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1, "foo"),
|
||||
testutil.TestMetric(1, "bar"),
|
||||
testutil.TestMetric(1, "baz"),
|
||||
}
|
||||
|
||||
expectedNames := []string{
|
||||
"fuz",
|
||||
"baz",
|
||||
"baz",
|
||||
}
|
||||
rfp := NewTestRunningProcessor()
|
||||
filteredMetrics := rfp.Apply(inmetrics...)
|
||||
|
||||
actualNames := []string{
|
||||
filteredMetrics[0].Name(),
|
||||
filteredMetrics[1].Name(),
|
||||
filteredMetrics[2].Name(),
|
||||
}
|
||||
assert.Equal(t, expectedNames, actualNames)
|
||||
}
|
||||
|
||||
func TestRunningProcessor_WithNameDrop(t *testing.T) {
|
||||
inmetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1, "foo"),
|
||||
testutil.TestMetric(1, "bar"),
|
||||
testutil.TestMetric(1, "baz"),
|
||||
}
|
||||
|
||||
expectedNames := []string{
|
||||
"foo",
|
||||
"baz",
|
||||
"baz",
|
||||
}
|
||||
rfp := NewTestRunningProcessor()
|
||||
|
||||
rfp.Config.Filter.NameDrop = []string{"foo"}
|
||||
assert.NoError(t, rfp.Config.Filter.Compile())
|
||||
|
||||
filteredMetrics := rfp.Apply(inmetrics...)
|
||||
|
||||
actualNames := []string{
|
||||
filteredMetrics[0].Name(),
|
||||
filteredMetrics[1].Name(),
|
||||
filteredMetrics[2].Name(),
|
||||
}
|
||||
assert.Equal(t, expectedNames, actualNames)
|
||||
}
|
||||
|
||||
func TestRunningProcessor_DroppedMetric(t *testing.T) {
|
||||
inmetrics := []telegraf.Metric{
|
||||
testutil.TestMetric(1, "dropme"),
|
||||
testutil.TestMetric(1, "foo"),
|
||||
testutil.TestMetric(1, "bar"),
|
||||
}
|
||||
|
||||
expectedNames := []string{
|
||||
"fuz",
|
||||
"baz",
|
||||
}
|
||||
rfp := NewTestRunningProcessor()
|
||||
filteredMetrics := rfp.Apply(inmetrics...)
|
||||
|
||||
actualNames := []string{
|
||||
filteredMetrics[0].Name(),
|
||||
filteredMetrics[1].Name(),
|
||||
}
|
||||
assert.Equal(t, expectedNames, actualNames)
|
||||
}
|
||||
58
logger/logger.go
Normal file
58
logger/logger.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/influxdata/wlog"
|
||||
)
|
||||
|
||||
// newTelegrafWriter returns a logging-wrapped writer.
|
||||
func newTelegrafWriter(w io.Writer) io.Writer {
|
||||
return &telegrafLog{
|
||||
writer: wlog.NewWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
type telegrafLog struct {
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *telegrafLog) Write(p []byte) (n int, err error) {
|
||||
return t.writer.Write(p)
|
||||
}
|
||||
|
||||
// SetupLogging configures the logging output.
|
||||
// debug will set the log level to DEBUG
|
||||
// quiet will set the log level to ERROR
|
||||
// logfile will direct the logging output to a file. Empty string is
|
||||
// interpreted as stderr. If there is an error opening the file the
|
||||
// logger will fallback to stderr.
|
||||
func SetupLogging(debug, quiet bool, logfile string) {
|
||||
if debug {
|
||||
wlog.SetLevel(wlog.DEBUG)
|
||||
}
|
||||
if quiet {
|
||||
wlog.SetLevel(wlog.ERROR)
|
||||
}
|
||||
|
||||
var oFile *os.File
|
||||
if logfile != "" {
|
||||
if _, err := os.Stat(logfile); os.IsNotExist(err) {
|
||||
if oFile, err = os.Create(logfile); err != nil {
|
||||
log.Printf("E! Unable to create %s (%s), using stderr", logfile, err)
|
||||
oFile = os.Stderr
|
||||
}
|
||||
} else {
|
||||
if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil {
|
||||
log.Printf("E! Unable to append to %s (%s), using stderr", logfile, err)
|
||||
oFile = os.Stderr
|
||||
}
|
||||
}
|
||||
} else {
|
||||
oFile = os.Stderr
|
||||
}
|
||||
|
||||
log.SetOutput(newTelegrafWriter(oFile))
|
||||
}
|
||||
44
metric.go
44
metric.go
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
@@ -33,6 +34,10 @@ type Metric interface {
|
||||
// UnixNano returns the unix nano time of the metric
|
||||
UnixNano() int64
|
||||
|
||||
// HashID returns a non-cryptographic hash of the metric (name + tags)
|
||||
// NOTE: do not persist & depend on this value to disk.
|
||||
HashID() uint64
|
||||
|
||||
// Fields returns the fields for the metric
|
||||
Fields() map[string]interface{}
|
||||
|
||||
@@ -44,13 +49,28 @@ type Metric interface {
|
||||
|
||||
// Point returns a influxdb client.Point object
|
||||
Point() *client.Point
|
||||
|
||||
// SetAggregate sets the metric's aggregate status
|
||||
// This is so that aggregate metrics don't get re-sent to aggregator plugins
|
||||
SetAggregate(bool)
|
||||
// IsAggregate returns true if the metric is an aggregate
|
||||
IsAggregate() bool
|
||||
}
|
||||
|
||||
// metric is a wrapper of the influxdb client.Point struct
|
||||
type metric struct {
|
||||
pt *client.Point
|
||||
pt models.Point
|
||||
|
||||
mType ValueType
|
||||
|
||||
isaggregate bool
|
||||
}
|
||||
|
||||
func NewMetricFromPoint(pt models.Point) Metric {
|
||||
return &metric{
|
||||
pt: pt,
|
||||
mType: Untyped,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMetric returns an untyped metric.
|
||||
@@ -60,7 +80,7 @@ func NewMetric(
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := client.NewPoint(name, tags, fields, t)
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -79,7 +99,7 @@ func NewGaugeMetric(
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := client.NewPoint(name, tags, fields, t)
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -98,7 +118,7 @@ func NewCounterMetric(
|
||||
fields map[string]interface{},
|
||||
t time.Time,
|
||||
) (Metric, error) {
|
||||
pt, err := client.NewPoint(name, tags, fields, t)
|
||||
pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -113,7 +133,7 @@ func (m *metric) Name() string {
|
||||
}
|
||||
|
||||
func (m *metric) Tags() map[string]string {
|
||||
return m.pt.Tags()
|
||||
return m.pt.Tags().Map()
|
||||
}
|
||||
|
||||
func (m *metric) Time() time.Time {
|
||||
@@ -124,6 +144,10 @@ func (m *metric) Type() ValueType {
|
||||
return m.mType
|
||||
}
|
||||
|
||||
func (m *metric) HashID() uint64 {
|
||||
return m.pt.HashID()
|
||||
}
|
||||
|
||||
func (m *metric) UnixNano() int64 {
|
||||
return m.pt.UnixNano()
|
||||
}
|
||||
@@ -141,5 +165,13 @@ func (m *metric) PrecisionString(precison string) string {
|
||||
}
|
||||
|
||||
func (m *metric) Point() *client.Point {
|
||||
return m.pt
|
||||
return client.NewPointFrom(m.pt)
|
||||
}
|
||||
|
||||
func (m *metric) IsAggregate() bool {
|
||||
return m.isaggregate
|
||||
}
|
||||
|
||||
func (m *metric) SetAggregate(b bool) {
|
||||
m.isaggregate = b
|
||||
}
|
||||
|
||||
5
plugins/aggregators/all/all.go
Normal file
5
plugins/aggregators/all/all.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
|
||||
)
|
||||
119
plugins/aggregators/minmax/minmax.go
Normal file
119
plugins/aggregators/minmax/minmax.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package minmax
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
)
|
||||
|
||||
type MinMax struct {
|
||||
cache map[uint64]aggregate
|
||||
}
|
||||
|
||||
func NewMinMax() telegraf.Aggregator {
|
||||
mm := &MinMax{}
|
||||
mm.Reset()
|
||||
return mm
|
||||
}
|
||||
|
||||
type aggregate struct {
|
||||
fields map[string]minmax
|
||||
name string
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
type minmax struct {
|
||||
min float64
|
||||
max float64
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## General Aggregator Arguments:
|
||||
## The period on which to flush & clear the aggregator.
|
||||
period = "30s"
|
||||
## If true, the original metric will be dropped by the
|
||||
## aggregator and will not get sent to the output plugins.
|
||||
drop_original = false
|
||||
`
|
||||
|
||||
func (m *MinMax) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (m *MinMax) Description() string {
|
||||
return "Keep the aggregate min/max of each metric passing through."
|
||||
}
|
||||
|
||||
func (m *MinMax) Add(in telegraf.Metric) {
|
||||
id := in.HashID()
|
||||
if _, ok := m.cache[id]; !ok {
|
||||
// hit an uncached metric, create caches for first time:
|
||||
a := aggregate{
|
||||
name: in.Name(),
|
||||
tags: in.Tags(),
|
||||
fields: make(map[string]minmax),
|
||||
}
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
a.fields[k] = minmax{
|
||||
min: fv,
|
||||
max: fv,
|
||||
}
|
||||
}
|
||||
}
|
||||
m.cache[id] = a
|
||||
} else {
|
||||
for k, v := range in.Fields() {
|
||||
if fv, ok := convert(v); ok {
|
||||
if _, ok := m.cache[id].fields[k]; !ok {
|
||||
// hit an uncached field of a cached metric
|
||||
m.cache[id].fields[k] = minmax{
|
||||
min: fv,
|
||||
max: fv,
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv < m.cache[id].fields[k].min {
|
||||
tmp := m.cache[id].fields[k]
|
||||
tmp.min = fv
|
||||
m.cache[id].fields[k] = tmp
|
||||
} else if fv > m.cache[id].fields[k].max {
|
||||
tmp := m.cache[id].fields[k]
|
||||
tmp.max = fv
|
||||
m.cache[id].fields[k] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MinMax) Push(acc telegraf.Accumulator) {
|
||||
for _, aggregate := range m.cache {
|
||||
fields := map[string]interface{}{}
|
||||
for k, v := range aggregate.fields {
|
||||
fields[k+"_min"] = v.min
|
||||
fields[k+"_max"] = v.max
|
||||
}
|
||||
acc.AddFields(aggregate.name, fields, aggregate.tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MinMax) Reset() {
|
||||
m.cache = make(map[uint64]aggregate)
|
||||
}
|
||||
|
||||
func convert(in interface{}) (float64, bool) {
|
||||
switch v := in.(type) {
|
||||
case float64:
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
aggregators.Add("minmax", func() telegraf.Aggregator {
|
||||
return NewMinMax()
|
||||
})
|
||||
}
|
||||
162
plugins/aggregators/minmax/minmax_test.go
Normal file
162
plugins/aggregators/minmax/minmax_test.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package minmax
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var m1, _ = telegraf.NewMetric("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"b": int64(1),
|
||||
"c": int64(1),
|
||||
"d": int64(1),
|
||||
"e": int64(1),
|
||||
"f": float64(2),
|
||||
"g": float64(2),
|
||||
"h": float64(2),
|
||||
"i": float64(2),
|
||||
"j": float64(3),
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
var m2, _ = telegraf.NewMetric("m1",
|
||||
map[string]string{"foo": "bar"},
|
||||
map[string]interface{}{
|
||||
"a": int64(1),
|
||||
"b": int64(3),
|
||||
"c": int64(3),
|
||||
"d": int64(3),
|
||||
"e": int64(3),
|
||||
"f": float64(1),
|
||||
"g": float64(1),
|
||||
"h": float64(1),
|
||||
"i": float64(1),
|
||||
"j": float64(1),
|
||||
"k": float64(200),
|
||||
"ignoreme": "string",
|
||||
"andme": true,
|
||||
},
|
||||
time.Now(),
|
||||
)
|
||||
|
||||
func BenchmarkApply(b *testing.B) {
|
||||
minmax := NewMinMax()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
}
|
||||
}
|
||||
|
||||
// Test two metrics getting added.
|
||||
func TestMinMaxWithPeriod(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewMinMax()
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Add(m2)
|
||||
minmax.Push(&acc)
|
||||
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"b_max": float64(3),
|
||||
"b_min": float64(1),
|
||||
"c_max": float64(3),
|
||||
"c_min": float64(1),
|
||||
"d_max": float64(3),
|
||||
"d_min": float64(1),
|
||||
"e_max": float64(3),
|
||||
"e_min": float64(1),
|
||||
"f_max": float64(2),
|
||||
"f_min": float64(1),
|
||||
"g_max": float64(2),
|
||||
"g_min": float64(1),
|
||||
"h_max": float64(2),
|
||||
"h_min": float64(1),
|
||||
"i_max": float64(2),
|
||||
"i_min": float64(1),
|
||||
"j_max": float64(3),
|
||||
"j_min": float64(1),
|
||||
"k_max": float64(200),
|
||||
"k_min": float64(200),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
|
||||
// Test two metrics getting added with a push/reset in between (simulates
|
||||
// getting added in different periods.)
|
||||
func TestMinMaxDifferentPeriods(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
minmax := NewMinMax()
|
||||
|
||||
minmax.Add(m1)
|
||||
minmax.Push(&acc)
|
||||
expectedFields := map[string]interface{}{
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"b_max": float64(1),
|
||||
"b_min": float64(1),
|
||||
"c_max": float64(1),
|
||||
"c_min": float64(1),
|
||||
"d_max": float64(1),
|
||||
"d_min": float64(1),
|
||||
"e_max": float64(1),
|
||||
"e_min": float64(1),
|
||||
"f_max": float64(2),
|
||||
"f_min": float64(2),
|
||||
"g_max": float64(2),
|
||||
"g_min": float64(2),
|
||||
"h_max": float64(2),
|
||||
"h_min": float64(2),
|
||||
"i_max": float64(2),
|
||||
"i_min": float64(2),
|
||||
"j_max": float64(3),
|
||||
"j_min": float64(3),
|
||||
}
|
||||
expectedTags := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
|
||||
acc.ClearMetrics()
|
||||
minmax.Reset()
|
||||
minmax.Add(m2)
|
||||
minmax.Push(&acc)
|
||||
expectedFields = map[string]interface{}{
|
||||
"a_max": float64(1),
|
||||
"a_min": float64(1),
|
||||
"b_max": float64(3),
|
||||
"b_min": float64(3),
|
||||
"c_max": float64(3),
|
||||
"c_min": float64(3),
|
||||
"d_max": float64(3),
|
||||
"d_min": float64(3),
|
||||
"e_max": float64(3),
|
||||
"e_min": float64(3),
|
||||
"f_max": float64(1),
|
||||
"f_min": float64(1),
|
||||
"g_max": float64(1),
|
||||
"g_min": float64(1),
|
||||
"h_max": float64(1),
|
||||
"h_min": float64(1),
|
||||
"i_max": float64(1),
|
||||
"i_min": float64(1),
|
||||
"j_max": float64(1),
|
||||
"j_min": float64(1),
|
||||
"k_max": float64(200),
|
||||
"k_min": float64(200),
|
||||
}
|
||||
expectedTags = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
|
||||
}
|
||||
11
plugins/aggregators/registry.go
Normal file
11
plugins/aggregators/registry.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package aggregators
|
||||
|
||||
import "github.com/influxdata/telegraf"
|
||||
|
||||
type Creator func() telegraf.Aggregator
|
||||
|
||||
var Aggregators = map[string]Creator{}
|
||||
|
||||
func Add(name string, creator Creator) {
|
||||
Aggregators[name] = creator
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
if err == nil {
|
||||
fields[strings.Replace(k, "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("skipping aerospike field %v with int64 overflow", k)
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_node", fields, tags, time.Now())
|
||||
@@ -121,7 +121,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
|
||||
if err == nil {
|
||||
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
|
||||
} else {
|
||||
log.Printf("skipping aerospike field %v with int64 overflow", parts[0])
|
||||
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
#### Description
|
||||
|
||||
The Cassandra plugin collects Cassandra/JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured.
|
||||
The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured.
|
||||
|
||||
See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html)
|
||||
|
||||
@@ -38,9 +38,9 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster
|
||||
|
||||
####measurement = javaGarbageCollector
|
||||
|
||||
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
|
||||
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime
|
||||
- /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount
|
||||
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
|
||||
- /java.lang:type=GarbageCollector,name=ParNew/CollectionTime
|
||||
- /java.lang:type=GarbageCollector,name=ParNew/CollectionCount
|
||||
|
||||
####measurement = javaMemory
|
||||
@@ -50,13 +50,13 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster
|
||||
|
||||
####measurement = cassandraCache
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hit
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Entries
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hit
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Size
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Hits
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Entries
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size
|
||||
- /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity
|
||||
@@ -67,33 +67,33 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster
|
||||
|
||||
####measurement = cassandraClientRequest
|
||||
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures
|
||||
- /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures
|
||||
|
||||
####measurement = cassandraCommitLog
|
||||
|
||||
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize
|
||||
|
||||
####measurement = cassandraCompaction
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTask
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted
|
||||
- /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted
|
||||
|
||||
####measurement = cassandraStorage
|
||||
|
||||
- /org.apache.cassandra.metrics:type=Storage,name=Load
|
||||
- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
|
||||
- /org.apache.cassandra.metrics:type=Storage,name=Exceptions
|
||||
|
||||
####measurement = cassandraTable
|
||||
Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them.
|
||||
@@ -101,25 +101,25 @@ Using wildcards for "keyspace" and "scope" can create a lot of series as metrics
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=TotalDiskSpaceUsed
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=CoordinatorReadLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency
|
||||
- /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency
|
||||
|
||||
|
||||
####measurement = cassandraThreadPools
|
||||
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks
|
||||
- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks
|
||||
|
||||
|
||||
|
||||
@@ -274,7 +274,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
m = newCassandraMetric(serverTokens["host"], metric, acc)
|
||||
} else {
|
||||
// unsupported metric type
|
||||
log.Printf("Unsupported Cassandra metric [%s], skipping",
|
||||
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
|
||||
metric)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -100,12 +100,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
|
||||
for _, s := range sockets {
|
||||
dump, err := perfDump(c.CephBinary, s)
|
||||
if err != nil {
|
||||
log.Printf("error reading from socket '%s': %v", s.socket, err)
|
||||
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
|
||||
continue
|
||||
}
|
||||
data, err := parseDump(dump)
|
||||
if err != nil {
|
||||
log.Printf("error parsing dump from socket '%s': %v", s.socket, err)
|
||||
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
|
||||
continue
|
||||
}
|
||||
for tag, metrics := range *data {
|
||||
@@ -293,7 +293,7 @@ func flatten(data interface{}) []*metric {
|
||||
}
|
||||
}
|
||||
default:
|
||||
log.Printf("Ignoring unexpected type '%T' for value %v", val, val)
|
||||
log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val)
|
||||
}
|
||||
|
||||
return metrics
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
This input plugin will capture specific statistics per cgroup.
|
||||
|
||||
Consider restricting paths to the set of cgroups you really
|
||||
want to monitor if you have a large number of cgroups, to avoid
|
||||
any cardinality issues.
|
||||
|
||||
Following file formats are supported:
|
||||
|
||||
* Single value
|
||||
@@ -33,9 +37,8 @@ KEY1 VAL1\n
|
||||
|
||||
### Tags:
|
||||
|
||||
Measurements don't have any specific tags unless you define them at the telegraf level (defaults). We
|
||||
used to have the path listed as a tag, but to keep cardinality in check it's easier to move this
|
||||
value to a field. Thanks @sebito91!
|
||||
All measurements have the following tags:
|
||||
- path
|
||||
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -11,15 +11,18 @@ type CGroup struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## Directories in which to look for files, globs are supported.
|
||||
# paths = [
|
||||
# "/cgroup/memory",
|
||||
# "/cgroup/memory/child1",
|
||||
# "/cgroup/memory/child2/*",
|
||||
# ]
|
||||
## cgroup stat fields, as file names, globs are supported.
|
||||
## these file names are appended to each path from above.
|
||||
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
## Directories in which to look for files, globs are supported.
|
||||
## Consider restricting paths to the set of cgroups you really
|
||||
## want to monitor if you have a large number of cgroups, to avoid
|
||||
## any cardinality issues.
|
||||
# paths = [
|
||||
# "/cgroup/memory",
|
||||
# "/cgroup/memory/child1",
|
||||
# "/cgroup/memory/child2/*",
|
||||
# ]
|
||||
## cgroup stat fields, as file names, globs are supported.
|
||||
## these file names are appended to each path from above.
|
||||
# files = ["memory.*usage*", "memory.limit_in_bytes"]
|
||||
`
|
||||
|
||||
func (g *CGroup) SampleConfig() string {
|
||||
|
||||
@@ -56,9 +56,10 @@ func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fields["path"] = dir
|
||||
|
||||
acc.AddFields(metricName, fields, nil)
|
||||
tags := map[string]string{"path": dir}
|
||||
|
||||
acc.AddFields(metricName, fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,13 +3,10 @@
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var cg1 = &CGroup{
|
||||
@@ -24,32 +21,15 @@ var cg1 = &CGroup{
|
||||
},
|
||||
}
|
||||
|
||||
func assertContainsFields(a *testutil.Accumulator, t *testing.T, measurement string, fieldSet []map[string]interface{}) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
numEquals := 0
|
||||
for _, p := range a.Metrics {
|
||||
if p.Measurement == measurement {
|
||||
for _, fields := range fieldSet {
|
||||
if reflect.DeepEqual(fields, p.Fields) {
|
||||
numEquals++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if numEquals != len(fieldSet) {
|
||||
assert.Fail(t, fmt.Sprintf("only %d of %d are equal", numEquals, len(fieldSet)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCgroupStatistics_1(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := cg1.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.stat.cache": 1739362304123123123,
|
||||
"memory.stat.rss": 1775325184,
|
||||
@@ -62,9 +42,8 @@ func TestCgroupStatistics_1(t *testing.T) {
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"notify_on_release": 0,
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
@@ -80,14 +59,16 @@ func TestCgroupStatistics_2(t *testing.T) {
|
||||
err := cg2.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/cpu",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"cpuacct.usage_percpu.0": -1452543795404,
|
||||
"cpuacct.usage_percpu.1": 1376681271659,
|
||||
"cpuacct.usage_percpu.2": 1450950799997,
|
||||
"cpuacct.usage_percpu.3": -1473113374257,
|
||||
"path": "testdata/cpu",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
@@ -103,16 +84,18 @@ func TestCgroupStatistics_3(t *testing.T) {
|
||||
err := cg3.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory/group_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
fieldsTwo := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_2",
|
||||
tags = map[string]string{
|
||||
"path": "testdata/memory/group_2",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
@@ -128,22 +111,23 @@ func TestCgroupStatistics_4(t *testing.T) {
|
||||
err := cg4.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
fieldsTwo := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1/group_1_2",
|
||||
tags = map[string]string{
|
||||
"path": "testdata/memory/group_1/group_1_2",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
fieldsThree := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_2",
|
||||
tags = map[string]string{
|
||||
"path": "testdata/memory/group_2",
|
||||
}
|
||||
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo, fieldsThree})
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
@@ -159,16 +143,18 @@ func TestCgroupStatistics_5(t *testing.T) {
|
||||
err := cg5.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_1/group_1_1",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
|
||||
fieldsTwo := map[string]interface{}{
|
||||
"memory.limit_in_bytes": 223372036854771712,
|
||||
"path": "testdata/memory/group_2/group_1_1",
|
||||
tags = map[string]string{
|
||||
"path": "testdata/memory/group_2/group_1_1",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields, fieldsTwo})
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
// ======================================================================
|
||||
@@ -184,11 +170,13 @@ func TestCgroupStatistics_6(t *testing.T) {
|
||||
err := cg6.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"memory.usage_in_bytes": 3513667584,
|
||||
"memory.use_hierarchy": "12-781",
|
||||
"memory.kmem.limit_in_bytes": 9223372036854771712,
|
||||
"path": "testdata/memory",
|
||||
}
|
||||
assertContainsFields(&acc, t, "cgroup", []map[string]interface{}{fields})
|
||||
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
|
||||
}
|
||||
|
||||
@@ -103,9 +103,13 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string
|
||||
tags["stratum"] = valueFields[0]
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(name), "reference_id") {
|
||||
tags["reference_id"] = valueFields[0]
|
||||
continue
|
||||
}
|
||||
value, err := strconv.ParseFloat(valueFields[0], 64)
|
||||
if err != nil {
|
||||
tags[name] = strings.ToLower(valueFields[0])
|
||||
tags[name] = strings.ToLower(strings.Join(valueFields, " "))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(stats[1], "slow") {
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestGather(t *testing.T) {
|
||||
|
||||
tags := map[string]string{
|
||||
"reference_id": "192.168.1.22",
|
||||
"leap_status": "normal",
|
||||
"leap_status": "not synchronized",
|
||||
"stratum": "3",
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
@@ -85,7 +85,7 @@ Skew : 0.006 ppm
|
||||
Root delay : 0.001655 seconds
|
||||
Root dispersion : 0.003307 seconds
|
||||
Update interval : 507.2 seconds
|
||||
Leap status : Normal
|
||||
Leap status : Not synchronized
|
||||
`
|
||||
|
||||
args := os.Args
|
||||
|
||||
@@ -18,21 +18,28 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
```toml
|
||||
[[inputs.cloudwatch]]
|
||||
## Amazon Region (required)
|
||||
region = 'us-east-1'
|
||||
region = "us-east-1"
|
||||
|
||||
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# metrics are made available to the 1 minute period. Some are collected at
|
||||
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# Note that if a period is configured that is smaller than the minimum for a
|
||||
# particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# and will not be collected by Telegraf.
|
||||
#
|
||||
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
period = '1m'
|
||||
period = "5m"
|
||||
|
||||
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
delay = '1m'
|
||||
delay = "5m"
|
||||
|
||||
## Override global run interval (optional - defaults to global interval)
|
||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = '1m'
|
||||
interval = "5m"
|
||||
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
@@ -43,16 +50,16 @@ API endpoint. In the following order the plugin will attempt to authenticate.
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
[[inputs.cloudwatch.metrics]]
|
||||
names = ['Latency', 'RequestCount']
|
||||
names = ["Latency", "RequestCount"]
|
||||
|
||||
## Dimension filters for Metric (optional)
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'LoadBalancerName'
|
||||
value = 'p-example'
|
||||
name = "LoadBalancerName"
|
||||
value = "p-example"
|
||||
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'AvailabilityZone'
|
||||
value = '*'
|
||||
name = "AvailabilityZone"
|
||||
value = "*"
|
||||
```
|
||||
#### Requirements and Terminology
|
||||
|
||||
@@ -71,16 +78,16 @@ wildcard dimension is ignored.
|
||||
Example:
|
||||
```
|
||||
[[inputs.cloudwatch.metrics]]
|
||||
names = ['Latency']
|
||||
names = ["Latency"]
|
||||
|
||||
## Dimension filters for Metric (optional)
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'LoadBalancerName'
|
||||
value = 'p-example'
|
||||
name = "LoadBalancerName"
|
||||
value = "p-example"
|
||||
|
||||
[[inputs.cloudwatch.metrics.dimensions]]
|
||||
name = 'AvailabilityZone'
|
||||
value = '*'
|
||||
name = "AvailabilityZone"
|
||||
value = "*"
|
||||
```
|
||||
|
||||
If the following ELBs are available:
|
||||
|
||||
@@ -63,7 +63,7 @@ type (
|
||||
func (c *CloudWatch) SampleConfig() string {
|
||||
return `
|
||||
## Amazon Region
|
||||
region = 'us-east-1'
|
||||
region = "us-east-1"
|
||||
|
||||
## Amazon Credentials
|
||||
## Credentials are loaded in the following order
|
||||
@@ -80,22 +80,29 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
#profile = ""
|
||||
#shared_credential_file = ""
|
||||
|
||||
# The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# metrics are made available to the 1 minute period. Some are collected at
|
||||
# 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# Note that if a period is configured that is smaller than the minimum for a
|
||||
# particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# and will not be collected by Telegraf.
|
||||
#
|
||||
## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
period = '1m'
|
||||
period = "5m"
|
||||
|
||||
## Collection Delay (required - must account for metrics availability via CloudWatch API)
|
||||
delay = '1m'
|
||||
delay = "5m"
|
||||
|
||||
## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
|
||||
## gaps or overlap in pulled data
|
||||
interval = '1m'
|
||||
interval = "5m"
|
||||
|
||||
## Configure the TTL for the internal cache of metrics.
|
||||
## Defaults to 1 hr if not specified
|
||||
#cache_ttl = '10m'
|
||||
#cache_ttl = "10m"
|
||||
|
||||
## Metric Statistic Namespace (required)
|
||||
namespace = 'AWS/ELB'
|
||||
namespace = "AWS/ELB"
|
||||
|
||||
## Maximum requests per second. Note that the global default AWS rate limit is
|
||||
## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
|
||||
@@ -106,12 +113,12 @@ func (c *CloudWatch) SampleConfig() string {
|
||||
## Defaults to all Metrics in Namespace if nothing is provided
|
||||
## Refreshes Namespace available metrics every 1h
|
||||
#[[inputs.cloudwatch.metrics]]
|
||||
# names = ['Latency', 'RequestCount']
|
||||
# names = ["Latency", "RequestCount"]
|
||||
#
|
||||
# ## Dimension filters for Metric (optional)
|
||||
# [[inputs.cloudwatch.metrics.dimensions]]
|
||||
# name = 'LoadBalancerName'
|
||||
# value = 'p-example'
|
||||
# name = "LoadBalancerName"
|
||||
# value = "p-example"
|
||||
`
|
||||
}
|
||||
|
||||
@@ -133,7 +140,6 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if !hasWilcard(m.Dimensions) {
|
||||
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
|
||||
for k, d := range m.Dimensions {
|
||||
fmt.Printf("Dimension [%s]:[%s]\n", d.Name, d.Value)
|
||||
dimensions[k] = &cloudwatch.Dimension{
|
||||
Name: aws.String(d.Name),
|
||||
Value: aws.String(d.Value),
|
||||
@@ -229,13 +235,12 @@ func (c *CloudWatch) initializeCloudWatch() error {
|
||||
/*
|
||||
* Fetch available metrics for given CloudWatch Namespace
|
||||
*/
|
||||
func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err error) {
|
||||
func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) {
|
||||
if c.metricCache != nil && c.metricCache.IsValid() {
|
||||
metrics = c.metricCache.Metrics
|
||||
return
|
||||
return c.metricCache.Metrics, nil
|
||||
}
|
||||
|
||||
metrics = []*cloudwatch.Metric{}
|
||||
metrics := []*cloudwatch.Metric{}
|
||||
|
||||
var token *string
|
||||
for more := true; more; {
|
||||
@@ -263,7 +268,7 @@ func (c *CloudWatch) fetchNamespaceMetrics() (metrics []*cloudwatch.Metric, err
|
||||
TTL: c.CacheTTL.Duration,
|
||||
}
|
||||
|
||||
return
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -93,13 +93,14 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
contents, err := ioutil.ReadFile(fName)
|
||||
if err != nil {
|
||||
log.Printf("failed to read file '%s': %v", fName, err)
|
||||
log.Printf("E! failed to read file '%s': %v", fName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
v := strings.TrimSpace(string(contents))
|
||||
fields[metricKey], err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Printf("failed to parse metric, expected number but "+
|
||||
log.Printf("E! failed to parse metric, expected number but "+
|
||||
" found '%s': %v", v, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,6 +103,9 @@ based on the availability of per-cpu stats on your system.
|
||||
- n_used_file_descriptors
|
||||
- n_cpus
|
||||
- n_containers
|
||||
- n_containers_running
|
||||
- n_containers_stopped
|
||||
- n_containers_paused
|
||||
- n_images
|
||||
- n_goroutines
|
||||
- n_listener_events
|
||||
@@ -153,6 +156,9 @@ based on the availability of per-cpu stats on your system.
|
||||
> docker n_cpus=8i 1456926671065383978
|
||||
> docker n_used_file_descriptors=15i 1456926671065383978
|
||||
> docker n_containers=7i 1456926671065383978
|
||||
> docker n_containers_running=7i 1456926671065383978
|
||||
> docker n_containers_stopped=3i 1456926671065383978
|
||||
> docker n_containers_paused=0i 1456926671065383978
|
||||
> docker n_images=152i 1456926671065383978
|
||||
> docker n_goroutines=36i 1456926671065383978
|
||||
> docker n_listener_events=0i 1456926671065383978
|
||||
|
||||
@@ -126,7 +126,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error {
|
||||
defer wg.Done()
|
||||
err := d.gatherContainer(c, acc)
|
||||
if err != nil {
|
||||
log.Printf("Error gathering container %s stats: %s\n",
|
||||
log.Printf("E! Error gathering container %s stats: %s\n",
|
||||
c.Names, err.Error())
|
||||
}
|
||||
}(container)
|
||||
@@ -154,6 +154,9 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
|
||||
"n_cpus": info.NCPU,
|
||||
"n_used_file_descriptors": info.NFd,
|
||||
"n_containers": info.Containers,
|
||||
"n_containers_running": info.ContainersRunning,
|
||||
"n_containers_stopped": info.ContainersStopped,
|
||||
"n_containers_paused": info.ContainersPaused,
|
||||
"n_images": info.Images,
|
||||
"n_goroutines": info.NGoroutines,
|
||||
"n_listener_events": info.NEventsListener,
|
||||
|
||||
@@ -256,6 +256,9 @@ type FakeDockerClient struct {
|
||||
func (d FakeDockerClient) Info(ctx context.Context) (types.Info, error) {
|
||||
env := types.Info{
|
||||
Containers: 108,
|
||||
ContainersRunning: 98,
|
||||
ContainersStopped: 6,
|
||||
ContainersPaused: 3,
|
||||
OomKillDisable: false,
|
||||
SystemTime: "2016-02-24T00:55:09.15073105-05:00",
|
||||
NEventsListener: 0,
|
||||
@@ -397,6 +400,9 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
"n_cpus": int(4),
|
||||
"n_used_file_descriptors": int(19),
|
||||
"n_containers": int(108),
|
||||
"n_containers_running": int(98),
|
||||
"n_containers_stopped": int(6),
|
||||
"n_containers_paused": int(3),
|
||||
"n_images": int(199),
|
||||
"n_goroutines": int(39),
|
||||
},
|
||||
|
||||
37
plugins/inputs/haproxy/README.md
Normal file
37
plugins/inputs/haproxy/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# HAproxy Input Plugin
|
||||
|
||||
[HAproxy](http://www.haproxy.org/) input plugin gathers metrics directly from any running HAproxy instance. It can do so by using CSV generated by HAproxy status page or from admin socket(s).
|
||||
|
||||
### Configuration:
|
||||
|
||||
```toml
|
||||
# SampleConfig
|
||||
[[inputs.haproxy]]
|
||||
servers = ["http://1.2.3.4/haproxy?stats", "/var/run/haproxy*.sock"]
|
||||
```
|
||||
|
||||
Server addresses need to explicitly start with 'http' if you wish to use HAproxy status page. Otherwise, address will be assumed to be an UNIX socket and protocol (if present) will be discarded.
|
||||
|
||||
Following examples will all resolve to the same socket:
|
||||
```
|
||||
socket:/var/run/haproxy.sock
|
||||
unix:/var/run/haproxy.sock
|
||||
foo:/var/run/haproxy.sock
|
||||
/var/run/haproxy.sock
|
||||
```
|
||||
|
||||
When using socket names, wildcard expansion is supported so plugin can gather stats from multiple sockets at once.
|
||||
|
||||
If no servers are specified, then the default address of `http://127.0.0.1:1936/haproxy?stats` will be used.
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
Plugin will gather measurements outlined in [HAproxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1).
|
||||
|
||||
### Tags:
|
||||
|
||||
- All measurements have the following tags:
|
||||
- server - address of server data is gathered from
|
||||
- proxy - proxy name as reported in `pxname`
|
||||
- sv - service name as reported in `svname`
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -17,7 +18,7 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1
|
||||
//CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1
|
||||
const (
|
||||
HF_PXNAME = 0 // 0. pxname [LFBS]: proxy name
|
||||
HF_SVNAME = 1 // 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener)
|
||||
@@ -93,12 +94,15 @@ var sampleConfig = `
|
||||
## An array of address to gather stats about. Specify an ip on hostname
|
||||
## with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||
## Make sure you specify the complete path to the stats endpoint
|
||||
## ie 10.10.3.33:1936/haproxy?stats
|
||||
## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
|
||||
#
|
||||
## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
|
||||
servers = ["http://myhaproxy.com:1936/haproxy?stats"]
|
||||
## Or you can also use local socket
|
||||
## servers = ["socket:/run/haproxy/admin.sock"]
|
||||
##
|
||||
## You can also use local socket with standard wildcard globbing.
|
||||
## Server address not starting with 'http' will be treated as a possible
|
||||
## socket, so both examples below are valid.
|
||||
## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
|
||||
`
|
||||
|
||||
func (r *haproxy) SampleConfig() string {
|
||||
@@ -116,10 +120,36 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
|
||||
return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc)
|
||||
}
|
||||
|
||||
endpoints := make([]string, 0, len(g.Servers))
|
||||
|
||||
for _, endpoint := range g.Servers {
|
||||
|
||||
if strings.HasPrefix(endpoint, "http") {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
|
||||
socketPath := getSocketAddr(endpoint)
|
||||
|
||||
matches, err := filepath.Glob(socketPath)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
endpoints = append(endpoints, socketPath)
|
||||
} else {
|
||||
for _, match := range matches {
|
||||
endpoints = append(endpoints, match)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := errchan.New(len(g.Servers))
|
||||
wg.Add(len(g.Servers))
|
||||
for _, server := range g.Servers {
|
||||
errChan := errchan.New(len(endpoints))
|
||||
wg.Add(len(endpoints))
|
||||
for _, server := range endpoints {
|
||||
go func(serv string) {
|
||||
defer wg.Done()
|
||||
errChan.C <- g.gatherServer(serv, acc)
|
||||
@@ -131,14 +161,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error {
|
||||
var socketPath string
|
||||
socketAddr := strings.Split(addr, ":")
|
||||
|
||||
if len(socketAddr) >= 2 {
|
||||
socketPath = socketAddr[1]
|
||||
} else {
|
||||
socketPath = socketAddr[0]
|
||||
}
|
||||
socketPath := getSocketAddr(addr)
|
||||
|
||||
c, err := net.Dial("unix", socketPath)
|
||||
|
||||
@@ -196,6 +219,16 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
return importCsvResult(res.Body, acc, u.Host)
|
||||
}
|
||||
|
||||
func getSocketAddr(sock string) string {
|
||||
socketAddr := strings.Split(sock, ":")
|
||||
|
||||
if len(socketAddr) >= 2 {
|
||||
return socketAddr[1]
|
||||
} else {
|
||||
return socketAddr[0]
|
||||
}
|
||||
}
|
||||
|
||||
func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
|
||||
csv := csv.NewReader(r)
|
||||
result, err := csv.ReadAll()
|
||||
|
||||
@@ -72,38 +72,7 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) {
|
||||
"sv": "host0",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
}
|
||||
fields := HaproxyGetFieldValues()
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
|
||||
//Here, we should get error because we don't pass authentication data
|
||||
@@ -136,102 +105,58 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) {
|
||||
"sv": "host0",
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
}
|
||||
fields := HaproxyGetFieldValues()
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
}
|
||||
|
||||
func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) {
|
||||
var randomNumber int64
|
||||
binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
|
||||
sock, err := net.Listen("unix", fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber))
|
||||
if err != nil {
|
||||
t.Fatal("Cannot initialize socket ")
|
||||
var sockets [5]net.Listener
|
||||
_globmask := "/tmp/test-haproxy*.sock"
|
||||
_badmask := "/tmp/test-fail-haproxy*.sock"
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
|
||||
sockname := fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber)
|
||||
|
||||
sock, err := net.Listen("unix", sockname)
|
||||
if err != nil {
|
||||
t.Fatal("Cannot initialize socket ")
|
||||
}
|
||||
|
||||
sockets[i] = sock
|
||||
defer sock.Close()
|
||||
|
||||
s := statServer{}
|
||||
go s.serverSocket(sock)
|
||||
}
|
||||
|
||||
defer sock.Close()
|
||||
|
||||
s := statServer{}
|
||||
go s.serverSocket(sock)
|
||||
|
||||
r := &haproxy{
|
||||
Servers: []string{sock.Addr().String()},
|
||||
Servers: []string{_globmask},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err = r.Gather(&acc)
|
||||
err := r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
tags := map[string]string{
|
||||
"proxy": "be_app",
|
||||
"server": sock.Addr().String(),
|
||||
"sv": "host0",
|
||||
fields := HaproxyGetFieldValues()
|
||||
|
||||
for _, sock := range sockets {
|
||||
tags := map[string]string{
|
||||
"proxy": "be_app",
|
||||
"server": sock.Addr().String(),
|
||||
"sv": "host0",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "haproxy", fields, tags)
|
||||
// This mask should not match any socket
|
||||
r.Servers = []string{_badmask}
|
||||
|
||||
err = r.Gather(&acc)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
//When not passing server config, we default to localhost
|
||||
@@ -246,6 +171,42 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv")
|
||||
}
|
||||
|
||||
func HaproxyGetFieldValues() map[string]interface{} {
|
||||
fields := map[string]interface{}{
|
||||
"active_servers": uint64(1),
|
||||
"backup_servers": uint64(0),
|
||||
"bin": uint64(510913516),
|
||||
"bout": uint64(2193856571),
|
||||
"check_duration": uint64(10),
|
||||
"cli_abort": uint64(73),
|
||||
"ctime": uint64(2),
|
||||
"downtime": uint64(0),
|
||||
"dresp": uint64(0),
|
||||
"econ": uint64(0),
|
||||
"eresp": uint64(1),
|
||||
"http_response.1xx": uint64(0),
|
||||
"http_response.2xx": uint64(119534),
|
||||
"http_response.3xx": uint64(48051),
|
||||
"http_response.4xx": uint64(2345),
|
||||
"http_response.5xx": uint64(1056),
|
||||
"lbtot": uint64(171013),
|
||||
"qcur": uint64(0),
|
||||
"qmax": uint64(0),
|
||||
"qtime": uint64(0),
|
||||
"rate": uint64(3),
|
||||
"rate_max": uint64(12),
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
"wredis": uint64(0),
|
||||
"wretr": uint64(1),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
const csvOutputSample = `
|
||||
# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,
|
||||
fe_app,FRONTEND,,81,288,713,2000,1094063,5557055817,24096715169,1102,80,95740,,,17,19,OPEN,,,,,,,,,2,16,113,13,114,,0,18,0,102,,,,0,1314093,537036,123452,11966,1360,,35,140,1987928,,,0,0,0,0,,,,,,,,
|
||||
|
||||
43
plugins/inputs/http_listener/bufferpool.go
Normal file
43
plugins/inputs/http_listener/bufferpool.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package http_listener
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type pool struct {
|
||||
buffers chan []byte
|
||||
size int
|
||||
|
||||
created int64
|
||||
}
|
||||
|
||||
// NewPool returns a new pool object.
|
||||
// n is the number of buffers
|
||||
// bufSize is the size (in bytes) of each buffer
|
||||
func NewPool(n, bufSize int) *pool {
|
||||
return &pool{
|
||||
buffers: make(chan []byte, n),
|
||||
size: bufSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pool) get() []byte {
|
||||
select {
|
||||
case b := <-p.buffers:
|
||||
return b
|
||||
default:
|
||||
atomic.AddInt64(&p.created, 1)
|
||||
return make([]byte, p.size)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pool) put(b []byte) {
|
||||
select {
|
||||
case p.buffers <- b:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pool) ncreated() int64 {
|
||||
return atomic.LoadInt64(&p.created)
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
package http_listener
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -11,126 +13,137 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/influxdata/telegraf/plugins/inputs/http_listener/stoppableListener"
|
||||
"github.com/influxdata/telegraf/plugins/parsers"
|
||||
"github.com/influxdata/telegraf/plugins/parsers/influx"
|
||||
)
|
||||
|
||||
type HttpListener struct {
|
||||
const (
|
||||
// DEFAULT_MAX_BODY_SIZE is the default maximum request body size, in bytes.
|
||||
// if the request body is over this size, we will return an HTTP 413 error.
|
||||
// 500 MB
|
||||
DEFAULT_MAX_BODY_SIZE = 500 * 1024 * 1024
|
||||
|
||||
// MAX_LINE_SIZE is the maximum size, in bytes, that can be allocated for
|
||||
// a single InfluxDB point.
|
||||
// 64 KB
|
||||
DEFAULT_MAX_LINE_SIZE = 64 * 1024
|
||||
)
|
||||
|
||||
type HTTPListener struct {
|
||||
ServiceAddress string
|
||||
ReadTimeout internal.Duration
|
||||
WriteTimeout internal.Duration
|
||||
MaxBodySize int64
|
||||
MaxLineSize int
|
||||
|
||||
sync.Mutex
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
|
||||
listener *stoppableListener.StoppableListener
|
||||
listener net.Listener
|
||||
|
||||
parser parsers.Parser
|
||||
parser influx.InfluxParser
|
||||
acc telegraf.Accumulator
|
||||
pool *pool
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## Address and port to host HTTP listener on
|
||||
service_address = ":8186"
|
||||
|
||||
## timeouts
|
||||
## maximum duration before timing out read of the request
|
||||
read_timeout = "10s"
|
||||
## maximum duration before timing out write of the response
|
||||
write_timeout = "10s"
|
||||
|
||||
## Maximum allowed http request body size in bytes.
|
||||
## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
|
||||
max_body_size = 0
|
||||
|
||||
## Maximum line size allowed to be sent in bytes.
|
||||
## 0 means to use the default of 65536 bytes (64 kibibytes)
|
||||
max_line_size = 0
|
||||
`
|
||||
|
||||
func (t *HttpListener) SampleConfig() string {
|
||||
func (h *HTTPListener) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
func (t *HttpListener) Description() string {
|
||||
func (h *HTTPListener) Description() string {
|
||||
return "Influx HTTP write listener"
|
||||
}
|
||||
|
||||
func (t *HttpListener) Gather(_ telegraf.Accumulator) error {
|
||||
func (h *HTTPListener) Gather(_ telegraf.Accumulator) error {
|
||||
log.Printf("D! The http_listener has created %d buffers", h.pool.ncreated())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *HttpListener) SetParser(parser parsers.Parser) {
|
||||
t.parser = parser
|
||||
}
|
||||
|
||||
// Start starts the http listener service.
|
||||
func (t *HttpListener) Start(acc telegraf.Accumulator) error {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
func (h *HTTPListener) Start(acc telegraf.Accumulator) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
t.acc = acc
|
||||
if h.MaxBodySize == 0 {
|
||||
h.MaxBodySize = DEFAULT_MAX_BODY_SIZE
|
||||
}
|
||||
if h.MaxLineSize == 0 {
|
||||
h.MaxLineSize = DEFAULT_MAX_LINE_SIZE
|
||||
}
|
||||
|
||||
var rawListener, err = net.Listen("tcp", t.ServiceAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.listener, err = stoppableListener.New(rawListener)
|
||||
h.acc = acc
|
||||
h.pool = NewPool(200, h.MaxLineSize)
|
||||
|
||||
var listener, err = net.Listen("tcp", h.ServiceAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.listener = listener
|
||||
|
||||
go t.httpListen()
|
||||
h.wg.Add(1)
|
||||
go func() {
|
||||
defer h.wg.Done()
|
||||
h.httpListen()
|
||||
}()
|
||||
|
||||
log.Printf("Started HTTP listener service on %s\n", t.ServiceAddress)
|
||||
log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop cleans up all resources
|
||||
func (t *HttpListener) Stop() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
func (h *HTTPListener) Stop() {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
t.listener.Stop()
|
||||
t.listener.Close()
|
||||
h.listener.Close()
|
||||
h.wg.Wait()
|
||||
|
||||
t.wg.Wait()
|
||||
|
||||
log.Println("Stopped HTTP listener service on ", t.ServiceAddress)
|
||||
log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress)
|
||||
}
|
||||
|
||||
// httpListen listens for HTTP requests.
|
||||
func (t *HttpListener) httpListen() error {
|
||||
if t.ReadTimeout.Duration < time.Second {
|
||||
t.ReadTimeout.Duration = time.Second * 10
|
||||
// httpListen sets up an http.Server and calls server.Serve.
|
||||
// like server.Serve, httpListen will always return a non-nil error, for this
|
||||
// reason, the error returned should probably be ignored.
|
||||
// see https://golang.org/pkg/net/http/#Server.Serve
|
||||
func (h *HTTPListener) httpListen() error {
|
||||
if h.ReadTimeout.Duration < time.Second {
|
||||
h.ReadTimeout.Duration = time.Second * 10
|
||||
}
|
||||
if t.WriteTimeout.Duration < time.Second {
|
||||
t.WriteTimeout.Duration = time.Second * 10
|
||||
if h.WriteTimeout.Duration < time.Second {
|
||||
h.WriteTimeout.Duration = time.Second * 10
|
||||
}
|
||||
|
||||
var server = http.Server{
|
||||
Handler: t,
|
||||
ReadTimeout: t.ReadTimeout.Duration,
|
||||
WriteTimeout: t.WriteTimeout.Duration,
|
||||
Handler: h,
|
||||
ReadTimeout: h.ReadTimeout.Duration,
|
||||
WriteTimeout: h.WriteTimeout.Duration,
|
||||
}
|
||||
|
||||
return server.Serve(t.listener)
|
||||
return server.Serve(h.listener)
|
||||
}
|
||||
|
||||
func (t *HttpListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
t.wg.Add(1)
|
||||
defer t.wg.Done()
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
log.Printf("Problem reading request: [%s], Error: %s\n", string(body), err)
|
||||
http.Error(res, "ERROR reading request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
switch req.URL.Path {
|
||||
case "/write":
|
||||
var metrics []telegraf.Metric
|
||||
metrics, err = t.parser.Parse(body)
|
||||
if err == nil {
|
||||
for _, m := range metrics {
|
||||
t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
} else {
|
||||
log.Printf("Problem parsing body: [%s], Error: %s\n", string(body), err)
|
||||
http.Error(res, "ERROR parsing metrics", http.StatusInternalServerError)
|
||||
}
|
||||
h.serveWrite(res, req)
|
||||
case "/query":
|
||||
// Deliver a dummy response to the query endpoint, as some InfluxDB
|
||||
// clients test endpoint availability with a query
|
||||
@@ -147,8 +160,135 @@ func (t *HttpListener) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
// Check that the content length is not too large for us to handle.
|
||||
if req.ContentLength > h.MaxBodySize {
|
||||
tooLarge(res)
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
// Handle gzip request bodies
|
||||
body := req.Body
|
||||
var err error
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
body, err = gzip.NewReader(req.Body)
|
||||
defer body.Close()
|
||||
if err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
badRequest(res)
|
||||
return
|
||||
}
|
||||
}
|
||||
body = http.MaxBytesReader(res, body, h.MaxBodySize)
|
||||
|
||||
var return400 bool
|
||||
var hangingBytes bool
|
||||
buf := h.pool.get()
|
||||
defer h.pool.put(buf)
|
||||
bufStart := 0
|
||||
for {
|
||||
n, err := io.ReadFull(body, buf[bufStart:])
|
||||
if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
|
||||
log.Println("E! " + err.Error())
|
||||
// problem reading the request body
|
||||
badRequest(res)
|
||||
return
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
if return400 {
|
||||
badRequest(res)
|
||||
} else {
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if hangingBytes {
|
||||
i := bytes.IndexByte(buf, '\n')
|
||||
if i == -1 {
|
||||
// still didn't find a newline, keep scanning
|
||||
continue
|
||||
}
|
||||
// rotate the bit remaining after the first newline to the front of the buffer
|
||||
i++ // start copying after the newline
|
||||
bufStart = len(buf) - i
|
||||
if bufStart > 0 {
|
||||
copy(buf, buf[i:])
|
||||
}
|
||||
hangingBytes = false
|
||||
continue
|
||||
}
|
||||
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// finished reading the request body
|
||||
if err := h.parse(buf[:n+bufStart], now); err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
return400 = true
|
||||
}
|
||||
if return400 {
|
||||
badRequest(res)
|
||||
} else {
|
||||
res.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// if we got down here it means that we filled our buffer, and there
|
||||
// are still bytes remaining to be read. So we will parse up until the
|
||||
// final newline, then push the rest of the bytes into the next buffer.
|
||||
i := bytes.LastIndexByte(buf, '\n')
|
||||
if i == -1 {
|
||||
// drop any line longer than the max buffer size
|
||||
log.Printf("E! http_listener received a single line longer than the maximum of %d bytes",
|
||||
len(buf))
|
||||
hangingBytes = true
|
||||
return400 = true
|
||||
bufStart = 0
|
||||
continue
|
||||
}
|
||||
if err := h.parse(buf[:i], now); err != nil {
|
||||
log.Println("E! " + err.Error())
|
||||
return400 = true
|
||||
}
|
||||
// rotate the bit remaining after the last newline to the front of the buffer
|
||||
i++ // start copying after the newline
|
||||
bufStart = len(buf) - i
|
||||
if bufStart > 0 {
|
||||
copy(buf, buf[i:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HTTPListener) parse(b []byte, t time.Time) error {
|
||||
metrics, err := h.parser.ParseWithDefaultTime(b, t)
|
||||
|
||||
for _, m := range metrics {
|
||||
h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func tooLarge(res http.ResponseWriter) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.WriteHeader(http.StatusRequestEntityTooLarge)
|
||||
res.Write([]byte(`{"error":"http: request body too large"}`))
|
||||
}
|
||||
|
||||
func badRequest(res http.ResponseWriter) {
|
||||
res.Header().Set("Content-Type", "application/json")
|
||||
res.Header().Set("X-Influxdb-Version", "1.0")
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
res.Write([]byte(`{"error":"http: bad request"}`))
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("http_listener", func() telegraf.Input {
|
||||
return &HttpListener{}
|
||||
return &HTTPListener{
|
||||
ServiceAddress: ":8186",
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,10 +0,0 @@
|
||||
Copyright (c) 2014, Eric Urban
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -1,62 +0,0 @@
|
||||
package stoppableListener
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type StoppableListener struct {
|
||||
*net.TCPListener //Wrapped listener
|
||||
stop chan int //Channel used only to indicate listener should shutdown
|
||||
}
|
||||
|
||||
func New(l net.Listener) (*StoppableListener, error) {
|
||||
tcpL, ok := l.(*net.TCPListener)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("Cannot wrap listener")
|
||||
}
|
||||
|
||||
retval := &StoppableListener{}
|
||||
retval.TCPListener = tcpL
|
||||
retval.stop = make(chan int)
|
||||
|
||||
return retval, nil
|
||||
}
|
||||
|
||||
var StoppedError = errors.New("Listener stopped")
|
||||
|
||||
func (sl *StoppableListener) Accept() (net.Conn, error) {
|
||||
|
||||
for {
|
||||
//Wait up to one second for a new connection
|
||||
sl.SetDeadline(time.Now().Add(time.Second))
|
||||
|
||||
newConn, err := sl.TCPListener.Accept()
|
||||
|
||||
//Check for the channel being closed
|
||||
select {
|
||||
case <-sl.stop:
|
||||
return nil, StoppedError
|
||||
default:
|
||||
//If the channel is still open, continue as normal
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
netErr, ok := err.(net.Error)
|
||||
|
||||
//If this is a timeout, then continue to wait for
|
||||
//new connections
|
||||
if ok && netErr.Timeout() && netErr.Temporary() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return newConn, err
|
||||
}
|
||||
}
|
||||
|
||||
func (sl *StoppableListener) Stop() {
|
||||
close(sl.stop)
|
||||
}
|
||||
BIN
plugins/inputs/http_listener/testdata/testmsgs.gz
vendored
Normal file
BIN
plugins/inputs/http_listener/testdata/testmsgs.gz
vendored
Normal file
Binary file not shown.
@@ -10,11 +10,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
type InfluxDB struct {
|
||||
URLs []string `toml:"urls"`
|
||||
|
||||
Timeout internal.Duration
|
||||
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (*InfluxDB) Description() string {
|
||||
@@ -32,6 +37,9 @@ func (*InfluxDB) SampleConfig() string {
|
||||
urls = [
|
||||
"http://localhost:8086/debug/vars"
|
||||
]
|
||||
|
||||
## http request & header timeout
|
||||
timeout = "5s"
|
||||
`
|
||||
}
|
||||
|
||||
@@ -39,6 +47,16 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error {
|
||||
if len(i.URLs) == 0 {
|
||||
i.URLs = []string{"http://localhost:8086/debug/vars"}
|
||||
}
|
||||
|
||||
if i.client == nil {
|
||||
i.client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: i.Timeout.Duration,
|
||||
},
|
||||
Timeout: i.Timeout.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
errorChannel := make(chan error, len(i.URLs))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -104,15 +122,6 @@ type memstats struct {
|
||||
GCCPUFraction float64 `json:"GCCPUFraction"`
|
||||
}
|
||||
|
||||
var tr = &http.Transport{
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
|
||||
var client = &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
|
||||
// Gathers data from a particular URL
|
||||
// Parameters:
|
||||
// acc : The telegraf Accumulator to use
|
||||
@@ -127,7 +136,7 @@ func (i *InfluxDB) gatherURL(
|
||||
shardCounter := 0
|
||||
now := time.Now()
|
||||
|
||||
resp, err := client.Get(url)
|
||||
resp, err := i.client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -248,6 +257,8 @@ func (i *InfluxDB) gatherURL(
|
||||
|
||||
func init() {
|
||||
inputs.Add("influxdb", func() telegraf.Input {
|
||||
return &InfluxDB{}
|
||||
return &InfluxDB{
|
||||
Timeout: internal.Duration{Duration: time.Second * 5},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
||||
case "newest":
|
||||
config.Offsets.Initial = sarama.OffsetNewest
|
||||
default:
|
||||
log.Printf("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||
log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n",
|
||||
k.Offset)
|
||||
config.Offsets.Initial = sarama.OffsetOldest
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error {
|
||||
|
||||
// Start the kafka message reader
|
||||
go k.receiver()
|
||||
log.Printf("Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||
log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n",
|
||||
k.ZookeeperPeers, k.Topics)
|
||||
return nil
|
||||
}
|
||||
@@ -128,11 +128,13 @@ func (k *Kafka) receiver() {
|
||||
case <-k.done:
|
||||
return
|
||||
case err := <-k.errs:
|
||||
log.Printf("Kafka Consumer Error: %s\n", err.Error())
|
||||
if err != nil {
|
||||
log.Printf("E! Kafka Consumer Error: %s\n", err)
|
||||
}
|
||||
case msg := <-k.in:
|
||||
metrics, err := k.parser.Parse(msg.Value)
|
||||
if err != nil {
|
||||
log.Printf("KAFKA PARSE ERROR\nmessage: %s\nerror: %s",
|
||||
log.Printf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s",
|
||||
string(msg.Value), err.Error())
|
||||
}
|
||||
|
||||
@@ -156,7 +158,7 @@ func (k *Kafka) Stop() {
|
||||
defer k.Unlock()
|
||||
close(k.done)
|
||||
if err := k.Consumer.Close(); err != nil {
|
||||
log.Printf("Error closing kafka consumer: %s\n", err.Error())
|
||||
log.Printf("E! Error closing kafka consumer: %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
265
plugins/inputs/kubernetes/README.md
Normal file
265
plugins/inputs/kubernetes/README.md
Normal file
@@ -0,0 +1,265 @@
|
||||
# Kubernetes Input Plugin
|
||||
|
||||
**This plugin is experimental and may cause high cardinality issues with moderate to large Kubernetes deployments**
|
||||
|
||||
This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet.
|
||||
|
||||
To find the ip address of the host you are running on you can issue a command like the following:
|
||||
```
|
||||
$ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP'
|
||||
```
|
||||
In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API.
|
||||
|
||||
## Summary Data
|
||||
|
||||
```json
|
||||
{
|
||||
"node": {
|
||||
"nodeName": "node1",
|
||||
"systemContainers": [
|
||||
{
|
||||
"name": "kubelet",
|
||||
"startTime": "2016-08-25T18:46:52Z",
|
||||
"cpu": {
|
||||
"time": "2016-09-27T16:57:31Z",
|
||||
"usageNanoCores": 56652446,
|
||||
"usageCoreNanoSeconds": 101437561712262
|
||||
},
|
||||
"memory": {
|
||||
"time": "2016-09-27T16:57:31Z",
|
||||
"usageBytes": 62529536,
|
||||
"workingSetBytes": 62349312,
|
||||
"rssBytes": 47509504,
|
||||
"pageFaults": 4769397409,
|
||||
"majorPageFaults": 13
|
||||
},
|
||||
"rootfs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800
|
||||
},
|
||||
"logs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800
|
||||
},
|
||||
"userDefinedMetrics": null
|
||||
},
|
||||
{
|
||||
"name": "bar",
|
||||
"startTime": "2016-08-25T18:46:52Z",
|
||||
"cpu": {
|
||||
"time": "2016-09-27T16:57:31Z",
|
||||
"usageNanoCores": 56652446,
|
||||
"usageCoreNanoSeconds": 101437561712262
|
||||
},
|
||||
"memory": {
|
||||
"time": "2016-09-27T16:57:31Z",
|
||||
"usageBytes": 62529536,
|
||||
"workingSetBytes": 62349312,
|
||||
"rssBytes": 47509504,
|
||||
"pageFaults": 4769397409,
|
||||
"majorPageFaults": 13
|
||||
},
|
||||
"rootfs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800
|
||||
},
|
||||
"logs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800
|
||||
},
|
||||
"userDefinedMetrics": null
|
||||
}
|
||||
],
|
||||
"startTime": "2016-08-25T18:46:52Z",
|
||||
"cpu": {
|
||||
"time": "2016-09-27T16:57:41Z",
|
||||
"usageNanoCores": 576996212,
|
||||
"usageCoreNanoSeconds": 774129887054161
|
||||
},
|
||||
"memory": {
|
||||
"time": "2016-09-27T16:57:41Z",
|
||||
"availableBytes": 10726387712,
|
||||
"usageBytes": 12313182208,
|
||||
"workingSetBytes": 5081538560,
|
||||
"rssBytes": 35586048,
|
||||
"pageFaults": 351742,
|
||||
"majorPageFaults": 1236
|
||||
},
|
||||
"network": {
|
||||
"time": "2016-09-27T16:57:41Z",
|
||||
"rxBytes": 213281337459,
|
||||
"rxErrors": 0,
|
||||
"txBytes": 292869995684,
|
||||
"txErrors": 0
|
||||
},
|
||||
"fs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800,
|
||||
"usedBytes": 16754286592
|
||||
},
|
||||
"runtime": {
|
||||
"imageFs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800,
|
||||
"usedBytes": 5809371475
|
||||
}
|
||||
}
|
||||
},
|
||||
"pods": [
|
||||
{
|
||||
"podRef": {
|
||||
"name": "foopod",
|
||||
"namespace": "foons",
|
||||
"uid": "6d305b06-8419-11e6-825c-42010af000ae"
|
||||
},
|
||||
"startTime": "2016-09-26T18:45:42Z",
|
||||
"containers": [
|
||||
{
|
||||
"name": "foocontainer",
|
||||
"startTime": "2016-09-26T18:46:43Z",
|
||||
"cpu": {
|
||||
"time": "2016-09-27T16:57:32Z",
|
||||
"usageNanoCores": 846503,
|
||||
"usageCoreNanoSeconds": 56507553554
|
||||
},
|
||||
"memory": {
|
||||
"time": "2016-09-27T16:57:32Z",
|
||||
"usageBytes": 30789632,
|
||||
"workingSetBytes": 30789632,
|
||||
"rssBytes": 30695424,
|
||||
"pageFaults": 10761,
|
||||
"majorPageFaults": 0
|
||||
},
|
||||
"rootfs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800,
|
||||
"usedBytes": 57344
|
||||
},
|
||||
"logs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800,
|
||||
"usedBytes": 24576
|
||||
},
|
||||
"userDefinedMetrics": null
|
||||
}
|
||||
],
|
||||
"network": {
|
||||
"time": "2016-09-27T16:57:34Z",
|
||||
"rxBytes": 70749124,
|
||||
"rxErrors": 0,
|
||||
"txBytes": 47813506,
|
||||
"txErrors": 0
|
||||
},
|
||||
"volume": [
|
||||
{
|
||||
"availableBytes": 7903948800,
|
||||
"capacityBytes": 7903961088,
|
||||
"usedBytes": 12288,
|
||||
"name": "volume1"
|
||||
},
|
||||
{
|
||||
"availableBytes": 7903956992,
|
||||
"capacityBytes": 7903961088,
|
||||
"usedBytes": 4096,
|
||||
"name": "volume2"
|
||||
},
|
||||
{
|
||||
"availableBytes": 7903948800,
|
||||
"capacityBytes": 7903961088,
|
||||
"usedBytes": 12288,
|
||||
"name": "volume3"
|
||||
},
|
||||
{
|
||||
"availableBytes": 7903952896,
|
||||
"capacityBytes": 7903961088,
|
||||
"usedBytes": 8192,
|
||||
"name": "volume4"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Daemonset YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: telegraf
|
||||
namespace: telegraf
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: telegraf
|
||||
spec:
|
||||
serviceAccount: telegraf
|
||||
containers:
|
||||
- name: telegraf
|
||||
image: quay.io/org/image:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: "HOST_PROC"
|
||||
value: "/rootfs/proc"
|
||||
- name: "HOST_SYS"
|
||||
value: "/rootfs/sys"
|
||||
volumeMounts:
|
||||
- name: sysro
|
||||
mountPath: /rootfs/sys
|
||||
readOnly: true
|
||||
- name: procro
|
||||
mountPath: /rootfs/proc
|
||||
readOnly: true
|
||||
- name: varrunutmpro
|
||||
mountPath: /var/run/utmp
|
||||
readOnly: true
|
||||
- name: logger-redis-creds
|
||||
mountPath: /var/run/secrets/deis/redis/creds
|
||||
volumes:
|
||||
- name: sysro
|
||||
hostPath:
|
||||
path: /sys
|
||||
- name: procro
|
||||
hostPath:
|
||||
path: /proc
|
||||
- name: varrunutmpro
|
||||
hostPath:
|
||||
path: /var/run/utmp
|
||||
```
|
||||
|
||||
### Line Protocol
|
||||
|
||||
#### kubernetes_pod_container
|
||||
```
|
||||
kubernetes_pod_container,host=ip-10-0-0-0.ec2.internal,
|
||||
container_name=deis-controller,namespace=deis,
|
||||
node_name=ip-10-0-0-0.ec2.internal, pod_name=deis-controller-3058870187-xazsr, cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,
|
||||
logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,
|
||||
logsfs_used_bytes=20787200i,memory_major_page_faults=0i,
|
||||
memory_page_faults=175i,memory_rss_bytes=0i,
|
||||
memory_usage_bytes=0i,memory_working_set_bytes=0i,
|
||||
rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,
|
||||
rootfs_used_bytes=1110016i 1476477530000000000
|
||||
```
|
||||
|
||||
#### kubernetes_pod_volume
|
||||
```
|
||||
kubernetes_pod_volume,host=ip-10-0-0-0.ec2.internal,name=default-token-f7wts,
|
||||
namespace=kube-system,node_name=ip-10-0-0-0.ec2.internal,
|
||||
pod_name=kubernetes-dashboard-v1.1.1-t4x4t, available_bytes=8415240192i,
|
||||
capacity_bytes=8415252480i,used_bytes=12288i 1476477530000000000
|
||||
```
|
||||
|
||||
#### kubernetes_pod_network
|
||||
```
|
||||
kubernetes_pod_network,host=ip-10-0-0-0.ec2.internal,namespace=deis,
|
||||
node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr,
|
||||
rx_bytes=120671099i,rx_errors=0i,
|
||||
tx_bytes=102451983i,tx_errors=0i 1476477530000000000
|
||||
```
|
||||
242
plugins/inputs/kubernetes/kubernetes.go
Normal file
242
plugins/inputs/kubernetes/kubernetes.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Kubernetes represents the config object for the plugin
|
||||
type Kubernetes struct {
|
||||
URL string
|
||||
|
||||
// Bearer Token authorization file path
|
||||
BearerToken string `toml:"bearer_token"`
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
SSLCert string `toml:"ssl_cert"`
|
||||
// Path to cert key file
|
||||
SSLKey string `toml:"ssl_key"`
|
||||
// Use SSL but skip chain & host verification
|
||||
InsecureSkipVerify bool
|
||||
|
||||
RoundTripper http.RoundTripper
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## URL for the kubelet
|
||||
url = "http://1.1.1.1:10255"
|
||||
|
||||
## Use bearer token for authorization
|
||||
# bearer_token = /path/to/bearer/token
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = /path/to/cafile
|
||||
# ssl_cert = /path/to/certfile
|
||||
# ssl_key = /path/to/keyfile
|
||||
## Use SSL but skip chain & host verification
|
||||
# insecure_skip_verify = false
|
||||
`
|
||||
|
||||
const (
|
||||
summaryEndpoint = `%s/stats/summary`
|
||||
)
|
||||
|
||||
func init() {
|
||||
inputs.Add("kubernetes", func() telegraf.Input {
|
||||
return &Kubernetes{}
|
||||
})
|
||||
}
|
||||
|
||||
//SampleConfig returns a sample config
|
||||
func (k *Kubernetes) SampleConfig() string {
|
||||
return sampleConfig
|
||||
}
|
||||
|
||||
//Description returns the description of this plugin
|
||||
func (k *Kubernetes) Description() string {
|
||||
return "Read metrics from the kubernetes kubelet api"
|
||||
}
|
||||
|
||||
//Gather collects kubernetes metrics from a given URL
|
||||
func (k *Kubernetes) Gather(acc telegraf.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
errChan := errchan.New(1)
|
||||
wg.Add(1)
|
||||
go func(k *Kubernetes) {
|
||||
defer wg.Done()
|
||||
errChan.C <- k.gatherSummary(k.URL, acc)
|
||||
}(k)
|
||||
wg.Wait()
|
||||
return errChan.Error()
|
||||
}
|
||||
|
||||
func buildURL(endpoint string, base string) (*url.URL, error) {
|
||||
u := fmt.Sprintf(endpoint, base)
|
||||
addr, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error {
|
||||
url := fmt.Sprintf("%s/stats/summary", baseURL)
|
||||
var req, err = http.NewRequest("GET", url, nil)
|
||||
var token []byte
|
||||
var resp *http.Response
|
||||
|
||||
tlsCfg, err := internal.GetTLSConfig(k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if k.RoundTripper == nil {
|
||||
k.RoundTripper = &http.Transport{
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
TLSClientConfig: tlsCfg,
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
if k.BearerToken != "" {
|
||||
token, err = ioutil.ReadFile(k.BearerToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+string(token))
|
||||
}
|
||||
|
||||
resp, err = k.RoundTripper.RoundTrip(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", url, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("%s returned HTTP status %s", url, resp.Status)
|
||||
}
|
||||
|
||||
summaryMetrics := &SummaryMetrics{}
|
||||
err = json.NewDecoder(resp.Body).Decode(summaryMetrics)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`Error parsing response: %s`, err)
|
||||
}
|
||||
buildSystemContainerMetrics(summaryMetrics, acc)
|
||||
buildNodeMetrics(summaryMetrics, acc)
|
||||
buildPodMetrics(summaryMetrics, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
|
||||
for _, container := range summaryMetrics.Node.SystemContainers {
|
||||
tags := map[string]string{
|
||||
"node_name": summaryMetrics.Node.NodeName,
|
||||
"container_name": container.Name,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores
|
||||
fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds
|
||||
fields["memory_usage_bytes"] = container.Memory.UsageBytes
|
||||
fields["memory_working_set_bytes"] = container.Memory.WorkingSetBytes
|
||||
fields["memory_rss_bytes"] = container.Memory.RSSBytes
|
||||
fields["memory_page_faults"] = container.Memory.PageFaults
|
||||
fields["memory_major_page_faults"] = container.Memory.MajorPageFaults
|
||||
fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes
|
||||
fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes
|
||||
fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes
|
||||
fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes
|
||||
acc.AddFields("kubernetes_system_container", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
|
||||
tags := map[string]string{
|
||||
"node_name": summaryMetrics.Node.NodeName,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
fields["cpu_usage_nanocores"] = summaryMetrics.Node.CPU.UsageNanoCores
|
||||
fields["cpu_usage_core_nanoseconds"] = summaryMetrics.Node.CPU.UsageCoreNanoSeconds
|
||||
fields["memory_available_bytes"] = summaryMetrics.Node.Memory.AvailableBytes
|
||||
fields["memory_usage_bytes"] = summaryMetrics.Node.Memory.UsageBytes
|
||||
fields["memory_working_set_bytes"] = summaryMetrics.Node.Memory.WorkingSetBytes
|
||||
fields["memory_rss_bytes"] = summaryMetrics.Node.Memory.RSSBytes
|
||||
fields["memory_page_faults"] = summaryMetrics.Node.Memory.PageFaults
|
||||
fields["memory_major_page_faults"] = summaryMetrics.Node.Memory.MajorPageFaults
|
||||
fields["network_rx_bytes"] = summaryMetrics.Node.Network.RXBytes
|
||||
fields["network_rx_errors"] = summaryMetrics.Node.Network.RXErrors
|
||||
fields["network_tx_bytes"] = summaryMetrics.Node.Network.TXBytes
|
||||
fields["network_tx_errors"] = summaryMetrics.Node.Network.TXErrors
|
||||
fields["fs_available_bytes"] = summaryMetrics.Node.FileSystem.AvailableBytes
|
||||
fields["fs_capacity_bytes"] = summaryMetrics.Node.FileSystem.CapacityBytes
|
||||
fields["fs_used_bytes"] = summaryMetrics.Node.FileSystem.UsedBytes
|
||||
fields["runtime_image_fs_available_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.AvailableBytes
|
||||
fields["runtime_image_fs_capacity_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.CapacityBytes
|
||||
fields["runtime_image_fs_used_bytes"] = summaryMetrics.Node.Runtime.ImageFileSystem.UsedBytes
|
||||
acc.AddFields("kubernetes_node", fields, tags)
|
||||
}
|
||||
|
||||
func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) {
|
||||
for _, pod := range summaryMetrics.Pods {
|
||||
for _, container := range pod.Containers {
|
||||
tags := map[string]string{
|
||||
"node_name": summaryMetrics.Node.NodeName,
|
||||
"namespace": pod.PodRef.Namespace,
|
||||
"container_name": container.Name,
|
||||
"pod_name": pod.PodRef.Name,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores
|
||||
fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds
|
||||
fields["memory_usage_bytes"] = container.Memory.UsageBytes
|
||||
fields["memory_working_set_bytes"] = container.Memory.WorkingSetBytes
|
||||
fields["memory_rss_bytes"] = container.Memory.RSSBytes
|
||||
fields["memory_page_faults"] = container.Memory.PageFaults
|
||||
fields["memory_major_page_faults"] = container.Memory.MajorPageFaults
|
||||
fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes
|
||||
fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes
|
||||
fields["rootfs_used_bytes"] = container.RootFS.UsedBytes
|
||||
fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes
|
||||
fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes
|
||||
fields["logsfs_used_bytes"] = container.LogsFS.UsedBytes
|
||||
acc.AddFields("kubernetes_pod_container", fields, tags)
|
||||
}
|
||||
|
||||
for _, volume := range pod.Volumes {
|
||||
tags := map[string]string{
|
||||
"node_name": summaryMetrics.Node.NodeName,
|
||||
"pod_name": pod.PodRef.Name,
|
||||
"namespace": pod.PodRef.Namespace,
|
||||
"volume_name": volume.Name,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
fields["available_bytes"] = volume.AvailableBytes
|
||||
fields["capacity_bytes"] = volume.CapacityBytes
|
||||
fields["used_bytes"] = volume.UsedBytes
|
||||
acc.AddFields("kubernetes_pod_volume", fields, tags)
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"node_name": summaryMetrics.Node.NodeName,
|
||||
"pod_name": pod.PodRef.Name,
|
||||
"namespace": pod.PodRef.Namespace,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
fields["rx_bytes"] = pod.Network.RXBytes
|
||||
fields["rx_errors"] = pod.Network.RXErrors
|
||||
fields["tx_bytes"] = pod.Network.TXBytes
|
||||
fields["tx_errors"] = pod.Network.TXErrors
|
||||
acc.AddFields("kubernetes_pod_network", fields, tags)
|
||||
}
|
||||
}
|
||||
93
plugins/inputs/kubernetes/kubernetes_metrics.go
Normal file
93
plugins/inputs/kubernetes/kubernetes_metrics.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package kubernetes
|
||||
|
||||
import "time"
|
||||
|
||||
// SummaryMetrics represents all the summary data about a paritcular node retrieved from a kubelet
|
||||
type SummaryMetrics struct {
|
||||
Node NodeMetrics `json:"node"`
|
||||
Pods []PodMetrics `json:"pods"`
|
||||
}
|
||||
|
||||
// NodeMetrics represents detailed information about a node
|
||||
type NodeMetrics struct {
|
||||
NodeName string `json:"nodeName"`
|
||||
SystemContainers []ContainerMetrics `json:"systemContainers"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
CPU CPUMetrics `json:"cpu"`
|
||||
Memory MemoryMetrics `json:"memory"`
|
||||
Network NetworkMetrics `json:"network"`
|
||||
FileSystem FileSystemMetrics `json:"fs"`
|
||||
Runtime RuntimeMetrics `json:"runtime"`
|
||||
}
|
||||
|
||||
// ContainerMetrics represents the metric data collect about a container from the kubelet
|
||||
type ContainerMetrics struct {
|
||||
Name string `json:"name"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
CPU CPUMetrics `json:"cpu"`
|
||||
Memory MemoryMetrics `json:"memory"`
|
||||
RootFS FileSystemMetrics `json:"rootfs"`
|
||||
LogsFS FileSystemMetrics `json:"logs"`
|
||||
}
|
||||
|
||||
// RuntimeMetrics contains metric data on the runtime of the system
|
||||
type RuntimeMetrics struct {
|
||||
ImageFileSystem FileSystemMetrics `json:"imageFs"`
|
||||
}
|
||||
|
||||
// CPUMetrics represents the cpu usage data of a pod or node
|
||||
type CPUMetrics struct {
|
||||
Time time.Time `json:"time"`
|
||||
UsageNanoCores int64 `json:"usageNanoCores"`
|
||||
UsageCoreNanoSeconds int64 `json:"usageCoreNanoSeconds"`
|
||||
}
|
||||
|
||||
// PodMetrics contains metric data on a given pod
|
||||
type PodMetrics struct {
|
||||
PodRef PodReference `json:"podRef"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
Containers []ContainerMetrics `json:"containers"`
|
||||
Network NetworkMetrics `json:"network"`
|
||||
Volumes []VolumeMetrics `json:"volume"`
|
||||
}
|
||||
|
||||
// PodReference is how a pod is identified
|
||||
type PodReference struct {
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
// MemoryMetrics represents the memory metrics for a pod or node
|
||||
type MemoryMetrics struct {
|
||||
Time time.Time `json:"time"`
|
||||
AvailableBytes int64 `json:"availableBytes"`
|
||||
UsageBytes int64 `json:"usageBytes"`
|
||||
WorkingSetBytes int64 `json:"workingSetBytes"`
|
||||
RSSBytes int64 `json:"rssBytes"`
|
||||
PageFaults int64 `json:"pageFaults"`
|
||||
MajorPageFaults int64 `json:"majorPageFaults"`
|
||||
}
|
||||
|
||||
// FileSystemMetrics represents disk usage metrics for a pod or node
|
||||
type FileSystemMetrics struct {
|
||||
AvailableBytes int64 `json:"availableBytes"`
|
||||
CapacityBytes int64 `json:"capacityBytes"`
|
||||
UsedBytes int64 `json:"usedBytes"`
|
||||
}
|
||||
|
||||
// NetworkMetrics represents network usage data for a pod or node
|
||||
type NetworkMetrics struct {
|
||||
Time time.Time `json:"time"`
|
||||
RXBytes int64 `json:"rxBytes"`
|
||||
RXErrors int64 `json:"rxErrors"`
|
||||
TXBytes int64 `json:"txBytes"`
|
||||
TXErrors int64 `json:"txErrors"`
|
||||
}
|
||||
|
||||
// VolumeMetrics represents the disk usage data for a given volume
|
||||
type VolumeMetrics struct {
|
||||
Name string `json:"name"`
|
||||
AvailableBytes int64 `json:"availableBytes"`
|
||||
CapacityBytes int64 `json:"capacityBytes"`
|
||||
UsedBytes int64 `json:"usedBytes"`
|
||||
}
|
||||
289
plugins/inputs/kubernetes/kubernetes_test.go
Normal file
289
plugins/inputs/kubernetes/kubernetes_test.go
Normal file
@@ -0,0 +1,289 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestKubernetesStats(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, response)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
k := &Kubernetes{
|
||||
URL: ts.URL,
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := k.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"cpu_usage_nanocores": int64(56652446),
|
||||
"cpu_usage_core_nanoseconds": int64(101437561712262),
|
||||
"memory_usage_bytes": int64(62529536),
|
||||
"memory_working_set_bytes": int64(62349312),
|
||||
"memory_rss_bytes": int64(47509504),
|
||||
"memory_page_faults": int64(4769397409),
|
||||
"memory_major_page_faults": int64(13),
|
||||
"rootfs_available_bytes": int64(84379979776),
|
||||
"rootfs_capacity_bytes": int64(105553100800),
|
||||
"logsfs_avaialble_bytes": int64(84379979776),
|
||||
"logsfs_capacity_bytes": int64(105553100800),
|
||||
}
|
||||
tags := map[string]string{
|
||||
"node_name": "node1",
|
||||
"container_name": "kubelet",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "kubernetes_system_container", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"cpu_usage_nanocores": int64(576996212),
|
||||
"cpu_usage_core_nanoseconds": int64(774129887054161),
|
||||
"memory_usage_bytes": int64(12313182208),
|
||||
"memory_working_set_bytes": int64(5081538560),
|
||||
"memory_rss_bytes": int64(35586048),
|
||||
"memory_page_faults": int64(351742),
|
||||
"memory_major_page_faults": int64(1236),
|
||||
"memory_available_bytes": int64(10726387712),
|
||||
"network_rx_bytes": int64(213281337459),
|
||||
"network_rx_errors": int64(0),
|
||||
"network_tx_bytes": int64(292869995684),
|
||||
"network_tx_errors": int64(0),
|
||||
"fs_available_bytes": int64(84379979776),
|
||||
"fs_capacity_bytes": int64(105553100800),
|
||||
"fs_used_bytes": int64(16754286592),
|
||||
"runtime_image_fs_available_bytes": int64(84379979776),
|
||||
"runtime_image_fs_capacity_bytes": int64(105553100800),
|
||||
"runtime_image_fs_used_bytes": int64(5809371475),
|
||||
}
|
||||
tags = map[string]string{
|
||||
"node_name": "node1",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "kubernetes_node", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"cpu_usage_nanocores": int64(846503),
|
||||
"cpu_usage_core_nanoseconds": int64(56507553554),
|
||||
"memory_usage_bytes": int64(30789632),
|
||||
"memory_working_set_bytes": int64(30789632),
|
||||
"memory_rss_bytes": int64(30695424),
|
||||
"memory_page_faults": int64(10761),
|
||||
"memory_major_page_faults": int64(0),
|
||||
"rootfs_available_bytes": int64(84379979776),
|
||||
"rootfs_capacity_bytes": int64(105553100800),
|
||||
"rootfs_used_bytes": int64(57344),
|
||||
"logsfs_avaialble_bytes": int64(84379979776),
|
||||
"logsfs_capacity_bytes": int64(105553100800),
|
||||
"logsfs_used_bytes": int64(24576),
|
||||
}
|
||||
tags = map[string]string{
|
||||
"node_name": "node1",
|
||||
"container_name": "foocontainer",
|
||||
"namespace": "foons",
|
||||
"pod_name": "foopod",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"available_bytes": int64(7903948800),
|
||||
"capacity_bytes": int64(7903961088),
|
||||
"used_bytes": int64(12288),
|
||||
}
|
||||
tags = map[string]string{
|
||||
"node_name": "node1",
|
||||
"volume_name": "volume1",
|
||||
"namespace": "foons",
|
||||
"pod_name": "foopod",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "kubernetes_pod_volume", fields, tags)
|
||||
|
||||
fields = map[string]interface{}{
|
||||
"rx_bytes": int64(70749124),
|
||||
"rx_errors": int64(0),
|
||||
"tx_bytes": int64(47813506),
|
||||
"tx_errors": int64(0),
|
||||
}
|
||||
tags = map[string]string{
|
||||
"node_name": "node1",
|
||||
"namespace": "foons",
|
||||
"pod_name": "foopod",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "kubernetes_pod_network", fields, tags)
|
||||
|
||||
}
|
||||
|
||||
var response = `
|
||||
{
|
||||
"node": {
|
||||
"nodeName": "node1",
|
||||
"systemContainers": [
|
||||
{
|
||||
"name": "kubelet",
|
||||
"startTime": "2016-08-25T18:46:52Z",
|
||||
"cpu": {
|
||||
"time": "2016-09-27T16:57:31Z",
|
||||
"usageNanoCores": 56652446,
|
||||
"usageCoreNanoSeconds": 101437561712262
|
||||
},
|
||||
"memory": {
|
||||
"time": "2016-09-27T16:57:31Z",
|
||||
"usageBytes": 62529536,
|
||||
"workingSetBytes": 62349312,
|
||||
"rssBytes": 47509504,
|
||||
"pageFaults": 4769397409,
|
||||
"majorPageFaults": 13
|
||||
},
|
||||
"rootfs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800
|
||||
},
|
||||
"logs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800
|
||||
},
|
||||
"userDefinedMetrics": null
|
||||
},
|
||||
{
|
||||
"name": "bar",
|
||||
"startTime": "2016-08-25T18:46:52Z",
|
||||
"cpu": {
|
||||
"time": "2016-09-27T16:57:31Z",
|
||||
"usageNanoCores": 56652446,
|
||||
"usageCoreNanoSeconds": 101437561712262
|
||||
},
|
||||
"memory": {
|
||||
"time": "2016-09-27T16:57:31Z",
|
||||
"usageBytes": 62529536,
|
||||
"workingSetBytes": 62349312,
|
||||
"rssBytes": 47509504,
|
||||
"pageFaults": 4769397409,
|
||||
"majorPageFaults": 13
|
||||
},
|
||||
"rootfs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800
|
||||
},
|
||||
"logs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800
|
||||
},
|
||||
"userDefinedMetrics": null
|
||||
}
|
||||
],
|
||||
"startTime": "2016-08-25T18:46:52Z",
|
||||
"cpu": {
|
||||
"time": "2016-09-27T16:57:41Z",
|
||||
"usageNanoCores": 576996212,
|
||||
"usageCoreNanoSeconds": 774129887054161
|
||||
},
|
||||
"memory": {
|
||||
"time": "2016-09-27T16:57:41Z",
|
||||
"availableBytes": 10726387712,
|
||||
"usageBytes": 12313182208,
|
||||
"workingSetBytes": 5081538560,
|
||||
"rssBytes": 35586048,
|
||||
"pageFaults": 351742,
|
||||
"majorPageFaults": 1236
|
||||
},
|
||||
"network": {
|
||||
"time": "2016-09-27T16:57:41Z",
|
||||
"rxBytes": 213281337459,
|
||||
"rxErrors": 0,
|
||||
"txBytes": 292869995684,
|
||||
"txErrors": 0
|
||||
},
|
||||
"fs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800,
|
||||
"usedBytes": 16754286592
|
||||
},
|
||||
"runtime": {
|
||||
"imageFs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800,
|
||||
"usedBytes": 5809371475
|
||||
}
|
||||
}
|
||||
},
|
||||
"pods": [
|
||||
{
|
||||
"podRef": {
|
||||
"name": "foopod",
|
||||
"namespace": "foons",
|
||||
"uid": "6d305b06-8419-11e6-825c-42010af000ae"
|
||||
},
|
||||
"startTime": "2016-09-26T18:45:42Z",
|
||||
"containers": [
|
||||
{
|
||||
"name": "foocontainer",
|
||||
"startTime": "2016-09-26T18:46:43Z",
|
||||
"cpu": {
|
||||
"time": "2016-09-27T16:57:32Z",
|
||||
"usageNanoCores": 846503,
|
||||
"usageCoreNanoSeconds": 56507553554
|
||||
},
|
||||
"memory": {
|
||||
"time": "2016-09-27T16:57:32Z",
|
||||
"usageBytes": 30789632,
|
||||
"workingSetBytes": 30789632,
|
||||
"rssBytes": 30695424,
|
||||
"pageFaults": 10761,
|
||||
"majorPageFaults": 0
|
||||
},
|
||||
"rootfs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800,
|
||||
"usedBytes": 57344
|
||||
},
|
||||
"logs": {
|
||||
"availableBytes": 84379979776,
|
||||
"capacityBytes": 105553100800,
|
||||
"usedBytes": 24576
|
||||
},
|
||||
"userDefinedMetrics": null
|
||||
}
|
||||
],
|
||||
"network": {
|
||||
"time": "2016-09-27T16:57:34Z",
|
||||
"rxBytes": 70749124,
|
||||
"rxErrors": 0,
|
||||
"txBytes": 47813506,
|
||||
"txErrors": 0
|
||||
},
|
||||
"volume": [
|
||||
{
|
||||
"availableBytes": 7903948800,
|
||||
"capacityBytes": 7903961088,
|
||||
"usedBytes": 12288,
|
||||
"name": "volume1"
|
||||
},
|
||||
{
|
||||
"availableBytes": 7903956992,
|
||||
"capacityBytes": 7903961088,
|
||||
"usedBytes": 4096,
|
||||
"name": "volume2"
|
||||
},
|
||||
{
|
||||
"availableBytes": 7903948800,
|
||||
"capacityBytes": 7903961088,
|
||||
"usedBytes": 12288,
|
||||
"name": "volume3"
|
||||
},
|
||||
{
|
||||
"availableBytes": 7903952896,
|
||||
"capacityBytes": 7903961088,
|
||||
"usedBytes": 8192,
|
||||
"name": "volume4"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}`
|
||||
@@ -202,21 +202,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
case INT:
|
||||
iv, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||
} else {
|
||||
fields[k] = iv
|
||||
}
|
||||
case FLOAT:
|
||||
fv, err := strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Printf("ERROR parsing %s to float: %s", v, err)
|
||||
log.Printf("E! Error parsing %s to float: %s", v, err)
|
||||
} else {
|
||||
fields[k] = fv
|
||||
}
|
||||
case DURATION:
|
||||
d, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
log.Printf("ERROR parsing %s to duration: %s", v, err)
|
||||
log.Printf("E! Error parsing %s to duration: %s", v, err)
|
||||
} else {
|
||||
fields[k] = int64(d)
|
||||
}
|
||||
@@ -227,14 +227,14 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
case EPOCH:
|
||||
iv, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||
} else {
|
||||
timestamp = time.Unix(iv, 0)
|
||||
}
|
||||
case EPOCH_NANO:
|
||||
iv, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("ERROR parsing %s to int: %s", v, err)
|
||||
log.Printf("E! Error parsing %s to int: %s", v, err)
|
||||
} else {
|
||||
timestamp = time.Unix(0, iv)
|
||||
}
|
||||
@@ -265,7 +265,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
// if we still haven't found a timestamp layout, log it and we will
|
||||
// just use time.Now()
|
||||
if !foundTs {
|
||||
log.Printf("ERROR parsing timestamp [%s], could not find any "+
|
||||
log.Printf("E! Error parsing timestamp [%s], could not find any "+
|
||||
"suitable time layouts.", v)
|
||||
}
|
||||
case DROP:
|
||||
@@ -275,7 +275,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) {
|
||||
if err == nil {
|
||||
timestamp = ts
|
||||
} else {
|
||||
log.Printf("ERROR parsing %s to time layout [%s]: %s", v, t, err)
|
||||
log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,6 +152,31 @@ func TestBuiltinCommonLogFormat(t *testing.T) {
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
// common log format
|
||||
// 127.0.0.1 user1234 frank1234 [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326
|
||||
func TestBuiltinCommonLogFormatWithNumbers(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
|
||||
// Parse an influxdb POST request
|
||||
m, err := p.ParseLine(`127.0.0.1 user1234 frank1234 [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank1234",
|
||||
"client_ip": "127.0.0.1",
|
||||
"http_version": float64(1.0),
|
||||
"ident": "user1234",
|
||||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
}
|
||||
|
||||
// combined log format
|
||||
// 127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"
|
||||
func TestBuiltinCombinedLogFormat(t *testing.T) {
|
||||
|
||||
@@ -53,7 +53,7 @@ RESPONSE_TIME %{DURATION:response_time_ns:duration}
|
||||
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||
|
||||
# Wider-ranging username matching vs. logstash built-in %{USER}
|
||||
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
|
||||
NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+
|
||||
NGUSER %{NGUSERNAME}
|
||||
# Wider-ranging client IP matching
|
||||
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||
@@ -64,7 +64,7 @@ CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||
|
||||
# apache & nginx logs, this is also known as the "common log format"
|
||||
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||
|
||||
# Combined log format is the same as the common log format but with the addition
|
||||
# of two quoted strings at the end for "referrer" and "agent"
|
||||
|
||||
@@ -49,7 +49,7 @@ RESPONSE_TIME %{DURATION:response_time_ns:duration}
|
||||
EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME}
|
||||
|
||||
# Wider-ranging username matching vs. logstash built-in %{USER}
|
||||
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
|
||||
NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+
|
||||
NGUSER %{NGUSERNAME}
|
||||
# Wider-ranging client IP matching
|
||||
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||
@@ -60,7 +60,7 @@ CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||
|
||||
# apache & nginx logs, this is also known as the "common log format"
|
||||
# see https://en.wikipedia.org/wiki/Common_Log_Format
|
||||
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||
COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-)
|
||||
|
||||
# Combined log format is the same as the common log format but with the addition
|
||||
# of two quoted strings at the end for "referrer" and "agent"
|
||||
|
||||
@@ -134,7 +134,7 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
|
||||
for _, filepath := range l.Files {
|
||||
g, err := globpath.Compile(filepath)
|
||||
if err != nil {
|
||||
log.Printf("ERROR Glob %s failed to compile, %s", filepath, err)
|
||||
log.Printf("E! Error Glob %s failed to compile, %s", filepath, err)
|
||||
continue
|
||||
}
|
||||
files := g.Match()
|
||||
@@ -167,7 +167,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) {
|
||||
var line *tail.Line
|
||||
for line = range tailer.Lines {
|
||||
if line.Err != nil {
|
||||
log.Printf("ERROR tailing file %s, Error: %s\n",
|
||||
log.Printf("E! Error tailing file %s, Error: %s\n",
|
||||
tailer.Filename, line.Err)
|
||||
continue
|
||||
}
|
||||
@@ -216,7 +216,7 @@ func (l *LogParserPlugin) Stop() {
|
||||
for _, t := range l.tailers {
|
||||
err := t.Stop()
|
||||
if err != nil {
|
||||
log.Printf("ERROR stopping tail on file %s\n", t.Filename)
|
||||
log.Printf("E! Error stopping tail on file %s\n", t.Filename)
|
||||
}
|
||||
t.Cleanup()
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
|
||||
req.URL.RawQuery = params.String()
|
||||
req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin")
|
||||
if api.Debug {
|
||||
log.Printf("Request URL: %s", req.URL.String())
|
||||
log.Printf("D! Request URL: %s", req.URL.String())
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
@@ -148,7 +148,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
if api.Debug {
|
||||
log.Printf("Response Body:%s", string(body))
|
||||
log.Printf("D! Response Body:%s", string(body))
|
||||
}
|
||||
|
||||
if err = chimpErrorCheck(body); err != nil {
|
||||
|
||||
@@ -35,13 +35,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso
|
||||
# "tasks",
|
||||
# "messages",
|
||||
# ]
|
||||
## Include mesos tasks statistics, default is false
|
||||
# slave_tasks = true
|
||||
```
|
||||
|
||||
By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default
|
||||
values. User needs to specify master/slave nodes this plugin will gather metrics from. Additionally, enabling `slave_tasks` will allow
|
||||
gathering metrics from tasks running on specified slaves (this option is disabled by default).
|
||||
values. User needs to specify master/slave nodes this plugin will gather metrics from.
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
@@ -235,31 +232,6 @@ Mesos slave metric groups
|
||||
- slave/valid_framework_messages
|
||||
- slave/valid_status_updates
|
||||
|
||||
Mesos tasks metric groups
|
||||
|
||||
- executor_id
|
||||
- executor_name
|
||||
- framework_id
|
||||
- source
|
||||
- statistics
|
||||
- cpus_limit
|
||||
- cpus_system_time_secs
|
||||
- cpus_user_time_secs
|
||||
- mem_anon_bytes
|
||||
- mem_cache_bytes
|
||||
- mem_critical_pressure_counter
|
||||
- mem_file_bytes
|
||||
- mem_limit_bytes
|
||||
- mem_low_pressure_counter
|
||||
- mem_mapped_file_bytes
|
||||
- mem_medium_pressure_counter
|
||||
- mem_rss_bytes
|
||||
- mem_swap_bytes
|
||||
- mem_total_bytes
|
||||
- mem_total_memsw_bytes
|
||||
- mem_unevictable_bytes
|
||||
- timestamp
|
||||
|
||||
### Tags:
|
||||
|
||||
- All master/slave measurements have the following tags:
|
||||
@@ -269,16 +241,11 @@ Mesos tasks metric groups
|
||||
- All master measurements have the extra tags:
|
||||
- state (leader/follower)
|
||||
|
||||
- Tasks measurements have the following tags:
|
||||
- server
|
||||
- framework_id
|
||||
- task_id
|
||||
|
||||
### Example Output:
|
||||
```
|
||||
$ telegraf -config ~/mesos.conf -input-filter mesos -test
|
||||
* Plugin: mesos, Collection 1
|
||||
mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101
|
||||
mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101
|
||||
allocator/event_queue_dispatches=0,master/cpus_percent=0,
|
||||
master/cpus_revocable_percent=0,master/cpus_revocable_total=0,
|
||||
master/cpus_revocable_used=0,master/cpus_total=2,
|
||||
@@ -297,15 +264,3 @@ master/mem_used=0,master/messages_authenticate=0,
|
||||
master/messages_deactivate_framework=0 ...
|
||||
```
|
||||
|
||||
Meoso tasks metrics (if enabled):
|
||||
```
|
||||
mesos-tasks,host=172.17.8.102,server=172.17.8.101,framework_id=e3060235-c4ed-4765-9d36-784e3beca07f-0000,task_id=hello-world.e4b5b497-2ccd-11e6-a659-0242fb222ce2
|
||||
cpus_limit=0.2,cpus_system_time_secs=142.49,cpus_user_time_secs=388.14,
|
||||
mem_anon_bytes=359129088,mem_cache_bytes=3964928,
|
||||
mem_critical_pressure_counter=0,mem_file_bytes=3964928,
|
||||
mem_limit_bytes=767557632,mem_low_pressure_counter=0,
|
||||
mem_mapped_file_bytes=114688,mem_medium_pressure_counter=0,
|
||||
mem_rss_bytes=359129088,mem_swap_bytes=0,mem_total_bytes=363094016,
|
||||
mem_total_memsw_bytes=363094016,mem_unevictable_bytes=0,
|
||||
timestamp=1465486052.70525 1465486053052811792...
|
||||
```
|
||||
|
||||
@@ -30,7 +30,7 @@ type Mesos struct {
|
||||
MasterCols []string `toml:"master_collections"`
|
||||
Slaves []string
|
||||
SlaveCols []string `toml:"slave_collections"`
|
||||
SlaveTasks bool
|
||||
//SlaveTasks bool
|
||||
}
|
||||
|
||||
var allMetrics = map[Role][]string{
|
||||
@@ -66,8 +66,6 @@ var sampleConfig = `
|
||||
# "tasks",
|
||||
# "messages",
|
||||
# ]
|
||||
## Include mesos tasks statistics, default is false
|
||||
# slave_tasks = true
|
||||
`
|
||||
|
||||
// SampleConfig returns a sample configuration block
|
||||
@@ -90,7 +88,7 @@ func (m *Mesos) SetDefaults() {
|
||||
}
|
||||
|
||||
if m.Timeout == 0 {
|
||||
log.Println("[mesos] Missing timeout value, setting default value (100ms)")
|
||||
log.Println("I! [mesos] Missing timeout value, setting default value (100ms)")
|
||||
m.Timeout = 100
|
||||
}
|
||||
}
|
||||
@@ -121,16 +119,16 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
|
||||
return
|
||||
}(v)
|
||||
|
||||
if !m.SlaveTasks {
|
||||
continue
|
||||
}
|
||||
// if !m.SlaveTasks {
|
||||
// continue
|
||||
// }
|
||||
|
||||
wg.Add(1)
|
||||
go func(c string) {
|
||||
errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc)
|
||||
wg.Done()
|
||||
return
|
||||
}(v)
|
||||
// wg.Add(1)
|
||||
// go func(c string) {
|
||||
// errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc)
|
||||
// wg.Done()
|
||||
// return
|
||||
// }(v)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@@ -385,7 +383,7 @@ func getMetrics(role Role, group string) []string {
|
||||
ret, ok := m[group]
|
||||
|
||||
if !ok {
|
||||
log.Printf("[mesos] Unkown %s metrics group: %s\n", role, group)
|
||||
log.Printf("I! [mesos] Unkown %s metrics group: %s\n", role, group)
|
||||
return []string{}
|
||||
}
|
||||
|
||||
@@ -459,7 +457,6 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t
|
||||
}
|
||||
|
||||
for _, task := range metrics {
|
||||
tags["task_id"] = task.ExecutorID
|
||||
tags["framework_id"] = task.FrameworkID
|
||||
|
||||
jf := jsonparser.JSONFlattener{}
|
||||
@@ -468,7 +465,9 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0)
|
||||
jf.Fields["executor_id"] = task.ExecutorID
|
||||
|
||||
acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp)
|
||||
}
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
)
|
||||
|
||||
var masterMetrics map[string]interface{}
|
||||
var masterTestServer *httptest.Server
|
||||
var slaveMetrics map[string]interface{}
|
||||
var slaveTaskMetrics map[string]interface{}
|
||||
|
||||
// var slaveTaskMetrics map[string]interface{}
|
||||
var slaveTestServer *httptest.Server
|
||||
|
||||
func randUUID() string {
|
||||
@@ -216,31 +216,31 @@ func generateMetrics() {
|
||||
slaveMetrics[k] = rand.Float64()
|
||||
}
|
||||
|
||||
slaveTaskMetrics = map[string]interface{}{
|
||||
"executor_id": fmt.Sprintf("task_%s", randUUID()),
|
||||
"executor_name": "Some task description",
|
||||
"framework_id": randUUID(),
|
||||
"source": fmt.Sprintf("task_source_%s", randUUID()),
|
||||
"statistics": map[string]interface{}{
|
||||
"cpus_limit": rand.Float64(),
|
||||
"cpus_system_time_secs": rand.Float64(),
|
||||
"cpus_user_time_secs": rand.Float64(),
|
||||
"mem_anon_bytes": float64(rand.Int63()),
|
||||
"mem_cache_bytes": float64(rand.Int63()),
|
||||
"mem_critical_pressure_counter": float64(rand.Int63()),
|
||||
"mem_file_bytes": float64(rand.Int63()),
|
||||
"mem_limit_bytes": float64(rand.Int63()),
|
||||
"mem_low_pressure_counter": float64(rand.Int63()),
|
||||
"mem_mapped_file_bytes": float64(rand.Int63()),
|
||||
"mem_medium_pressure_counter": float64(rand.Int63()),
|
||||
"mem_rss_bytes": float64(rand.Int63()),
|
||||
"mem_swap_bytes": float64(rand.Int63()),
|
||||
"mem_total_bytes": float64(rand.Int63()),
|
||||
"mem_total_memsw_bytes": float64(rand.Int63()),
|
||||
"mem_unevictable_bytes": float64(rand.Int63()),
|
||||
"timestamp": rand.Float64(),
|
||||
},
|
||||
}
|
||||
// slaveTaskMetrics = map[string]interface{}{
|
||||
// "executor_id": fmt.Sprintf("task_name.%s", randUUID()),
|
||||
// "executor_name": "Some task description",
|
||||
// "framework_id": randUUID(),
|
||||
// "source": fmt.Sprintf("task_source.%s", randUUID()),
|
||||
// "statistics": map[string]interface{}{
|
||||
// "cpus_limit": rand.Float64(),
|
||||
// "cpus_system_time_secs": rand.Float64(),
|
||||
// "cpus_user_time_secs": rand.Float64(),
|
||||
// "mem_anon_bytes": float64(rand.Int63()),
|
||||
// "mem_cache_bytes": float64(rand.Int63()),
|
||||
// "mem_critical_pressure_counter": float64(rand.Int63()),
|
||||
// "mem_file_bytes": float64(rand.Int63()),
|
||||
// "mem_limit_bytes": float64(rand.Int63()),
|
||||
// "mem_low_pressure_counter": float64(rand.Int63()),
|
||||
// "mem_mapped_file_bytes": float64(rand.Int63()),
|
||||
// "mem_medium_pressure_counter": float64(rand.Int63()),
|
||||
// "mem_rss_bytes": float64(rand.Int63()),
|
||||
// "mem_swap_bytes": float64(rand.Int63()),
|
||||
// "mem_total_bytes": float64(rand.Int63()),
|
||||
// "mem_total_memsw_bytes": float64(rand.Int63()),
|
||||
// "mem_unevictable_bytes": float64(rand.Int63()),
|
||||
// "timestamp": rand.Float64(),
|
||||
// },
|
||||
// }
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -260,11 +260,11 @@ func TestMain(m *testing.M) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(slaveMetrics)
|
||||
})
|
||||
slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics})
|
||||
})
|
||||
// slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) {
|
||||
// w.WriteHeader(http.StatusOK)
|
||||
// w.Header().Set("Content-Type", "application/json")
|
||||
// json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics})
|
||||
// })
|
||||
slaveTestServer = httptest.NewServer(slaveRouter)
|
||||
|
||||
rc := m.Run()
|
||||
@@ -324,10 +324,10 @@ func TestMesosSlave(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
|
||||
m := Mesos{
|
||||
Masters: []string{},
|
||||
Slaves: []string{slaveTestServer.Listener.Addr().String()},
|
||||
SlaveTasks: true,
|
||||
Timeout: 10,
|
||||
Masters: []string{},
|
||||
Slaves: []string{slaveTestServer.Listener.Addr().String()},
|
||||
// SlaveTasks: true,
|
||||
Timeout: 10,
|
||||
}
|
||||
|
||||
err := m.Gather(&acc)
|
||||
@@ -338,17 +338,17 @@ func TestMesosSlave(t *testing.T) {
|
||||
|
||||
acc.AssertContainsFields(t, "mesos", slaveMetrics)
|
||||
|
||||
jf := jsonparser.JSONFlattener{}
|
||||
err = jf.FlattenJSON("", slaveTaskMetrics)
|
||||
// expectedFields := make(map[string]interface{}, len(slaveTaskMetrics["statistics"].(map[string]interface{}))+1)
|
||||
// for k, v := range slaveTaskMetrics["statistics"].(map[string]interface{}) {
|
||||
// expectedFields[k] = v
|
||||
// }
|
||||
// expectedFields["executor_id"] = slaveTaskMetrics["executor_id"]
|
||||
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(
|
||||
t,
|
||||
"mesos_tasks",
|
||||
slaveTaskMetrics["statistics"].(map[string]interface{}))
|
||||
// acc.AssertContainsTaggedFields(
|
||||
// t,
|
||||
// "mesos_tasks",
|
||||
// expectedFields,
|
||||
// map[string]string{"server": "127.0.0.1", "framework_id": slaveTaskMetrics["framework_id"].(string)})
|
||||
}
|
||||
|
||||
func TestSlaveFilter(t *testing.T) {
|
||||
|
||||
@@ -47,7 +47,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
},
|
||||
}, result_repl)
|
||||
if err != nil {
|
||||
log.Println("Not gathering replica set status, member not in replica set (" + err.Error() + ")")
|
||||
log.Println("E! Not gathering replica set status, member not in replica set (" + err.Error() + ")")
|
||||
}
|
||||
|
||||
jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()
|
||||
@@ -62,7 +62,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
names := []string{}
|
||||
names, err = s.Session.DatabaseNames()
|
||||
if err != nil {
|
||||
log.Println("Error getting database names (" + err.Error() + ")")
|
||||
log.Println("E! Error getting database names (" + err.Error() + ")")
|
||||
}
|
||||
for _, db_name := range names {
|
||||
db_stat_line := &DbStatsData{}
|
||||
@@ -73,7 +73,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
},
|
||||
}, db_stat_line)
|
||||
if err != nil {
|
||||
log.Println("Error getting db stats from " + db_name + "(" + err.Error() + ")")
|
||||
log.Println("E! Error getting db stats from " + db_name + "(" + err.Error() + ")")
|
||||
}
|
||||
db := &Db{
|
||||
Name: db_name,
|
||||
|
||||
@@ -133,7 +133,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error {
|
||||
return nil
|
||||
}
|
||||
func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
||||
log.Printf("MQTT Client Connected")
|
||||
log.Printf("I! MQTT Client Connected")
|
||||
if !m.PersistentSession || !m.started {
|
||||
topics := make(map[string]byte)
|
||||
for _, topic := range m.Topics {
|
||||
@@ -142,7 +142,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
||||
subscribeToken := c.SubscribeMultiple(topics, m.recvMessage)
|
||||
subscribeToken.Wait()
|
||||
if subscribeToken.Error() != nil {
|
||||
log.Printf("MQTT SUBSCRIBE ERROR\ntopics: %s\nerror: %s",
|
||||
log.Printf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s",
|
||||
strings.Join(m.Topics[:], ","), subscribeToken.Error())
|
||||
}
|
||||
m.started = true
|
||||
@@ -151,7 +151,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) {
|
||||
}
|
||||
|
||||
func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) {
|
||||
log.Printf("MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error())
|
||||
log.Printf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ func (m *MQTTConsumer) receiver() {
|
||||
topic := msg.Topic()
|
||||
metrics, err := m.parser.Parse(msg.Payload())
|
||||
if err != nil {
|
||||
log.Printf("MQTT PARSE ERROR\nmessage: %s\nerror: %s",
|
||||
log.Printf("E! MQTT Parse Error\nmessage: %s\nerror: %s",
|
||||
string(msg.Payload()), err.Error())
|
||||
}
|
||||
|
||||
|
||||
@@ -4,16 +4,16 @@ import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
)
|
||||
|
||||
type Mysql struct {
|
||||
@@ -69,13 +69,13 @@ var sampleConfig = `
|
||||
## gather metrics from SHOW BINARY LOGS command output
|
||||
gather_binary_logs = false
|
||||
#
|
||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE
|
||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
|
||||
gather_table_io_waits = false
|
||||
#
|
||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
|
||||
gather_table_lock_waits = false
|
||||
#
|
||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE
|
||||
## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
|
||||
gather_index_io_waits = false
|
||||
#
|
||||
## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
|
||||
@@ -313,6 +313,10 @@ var mappings = []*mapping{
|
||||
onServer: "wsrep_",
|
||||
inExport: "wsrep_",
|
||||
},
|
||||
{
|
||||
onServer: "Uptime_",
|
||||
inExport: "uptime_",
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -394,27 +398,6 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func dsnAddTimeout(dsn string) (string, error) {
|
||||
|
||||
// DSN "?timeout=5s" is not valid, but "/?timeout=5s" is valid ("" and "/"
|
||||
// are the same DSN)
|
||||
if dsn == "" {
|
||||
dsn = "/"
|
||||
}
|
||||
u, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
v := u.Query()
|
||||
|
||||
// Only override timeout if not already defined
|
||||
if _, ok := v["timeout"]; ok == false {
|
||||
v.Add("timeout", defaultTimeout.String())
|
||||
u.RawQuery = v.Encode()
|
||||
}
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
// Math constants
|
||||
const (
|
||||
picoSeconds = 1e12
|
||||
@@ -678,10 +661,7 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu
|
||||
var val sql.RawBytes
|
||||
|
||||
// parse DSN and save server tag
|
||||
servtag, err := parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag := getDSNTag(serv)
|
||||
tags := map[string]string{"server": servtag}
|
||||
fields := make(map[string]interface{})
|
||||
for rows.Next() {
|
||||
@@ -718,10 +698,7 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
servtag, err := parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag := getDSNTag(serv)
|
||||
|
||||
tags := map[string]string{"server": servtag}
|
||||
fields := make(map[string]interface{})
|
||||
@@ -766,11 +743,7 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat
|
||||
defer rows.Close()
|
||||
|
||||
// parse DSN and save host as a tag
|
||||
var servtag string
|
||||
servtag, err = parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag := getDSNTag(serv)
|
||||
tags := map[string]string{"server": servtag}
|
||||
var (
|
||||
size uint64 = 0
|
||||
@@ -813,11 +786,7 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum
|
||||
}
|
||||
|
||||
// parse the DSN and save host name as a tag
|
||||
var servtag string
|
||||
servtag, err = parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag := getDSNTag(serv)
|
||||
tags := map[string]string{"server": servtag}
|
||||
fields := make(map[string]interface{})
|
||||
for rows.Next() {
|
||||
@@ -928,10 +897,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf.
|
||||
|
||||
var servtag string
|
||||
fields := make(map[string]interface{})
|
||||
servtag, err = parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag = getDSNTag(serv)
|
||||
|
||||
// mapping of state with its counts
|
||||
stateCounts := make(map[string]uint32, len(generalThreadStates))
|
||||
@@ -974,10 +940,7 @@ func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, serv string, acc telegraf.Acc
|
||||
timeFetch, timeInsert, timeUpdate, timeDelete float64
|
||||
)
|
||||
|
||||
servtag, err = parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag = getDSNTag(serv)
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&objSchema, &objName,
|
||||
@@ -1026,10 +989,7 @@ func (m *Mysql) gatherPerfIndexIOWaits(db *sql.DB, serv string, acc telegraf.Acc
|
||||
timeFetch, timeInsert, timeUpdate, timeDelete float64
|
||||
)
|
||||
|
||||
servtag, err = parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag = getDSNTag(serv)
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&objSchema, &objName, &indexName,
|
||||
@@ -1081,10 +1041,7 @@ func (m *Mysql) gatherInfoSchemaAutoIncStatuses(db *sql.DB, serv string, acc tel
|
||||
incValue, maxInt uint64
|
||||
)
|
||||
|
||||
servtag, err := parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag := getDSNTag(serv)
|
||||
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&schema, &table, &column, &incValue, &maxInt); err != nil {
|
||||
@@ -1128,10 +1085,7 @@ func (m *Mysql) gatherPerfTableLockWaits(db *sql.DB, serv string, acc telegraf.A
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
servtag, err := parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag := getDSNTag(serv)
|
||||
|
||||
var (
|
||||
objectSchema string
|
||||
@@ -1253,10 +1207,7 @@ func (m *Mysql) gatherPerfEventWaits(db *sql.DB, serv string, acc telegraf.Accum
|
||||
starCount, timeWait float64
|
||||
)
|
||||
|
||||
servtag, err := parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag := getDSNTag(serv)
|
||||
tags := map[string]string{
|
||||
"server": servtag,
|
||||
}
|
||||
@@ -1291,10 +1242,7 @@ func (m *Mysql) gatherPerfFileEventsStatuses(db *sql.DB, serv string, acc telegr
|
||||
sumNumBytesRead, sumNumBytesWrite float64
|
||||
)
|
||||
|
||||
servtag, err := parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag := getDSNTag(serv)
|
||||
tags := map[string]string{
|
||||
"server": servtag,
|
||||
}
|
||||
@@ -1361,10 +1309,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
|
||||
noIndexUsed float64
|
||||
)
|
||||
|
||||
servtag, err := parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
servtag := getDSNTag(serv)
|
||||
tags := map[string]string{
|
||||
"server": servtag,
|
||||
}
|
||||
@@ -1408,14 +1353,8 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
|
||||
|
||||
// gatherTableSchema can be used to gather stats on each schema
|
||||
func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumulator) error {
|
||||
var (
|
||||
dbList []string
|
||||
servtag string
|
||||
)
|
||||
servtag, err := parseDSN(serv)
|
||||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
var dbList []string
|
||||
servtag := getDSNTag(serv)
|
||||
|
||||
// if the list of databases if empty, then get all databases
|
||||
if len(m.TableSchemaDatabases) == 0 {
|
||||
@@ -1571,6 +1510,27 @@ func copyTags(in map[string]string) map[string]string {
|
||||
return out
|
||||
}
|
||||
|
||||
func dsnAddTimeout(dsn string) (string, error) {
|
||||
conf, err := mysql.ParseDSN(dsn)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if conf.Timeout == 0 {
|
||||
conf.Timeout = time.Second * 5
|
||||
}
|
||||
|
||||
return conf.FormatDSN(), nil
|
||||
}
|
||||
|
||||
func getDSNTag(dsn string) string {
|
||||
conf, err := mysql.ParseDSN(dsn)
|
||||
if err != nil {
|
||||
return "127.0.0.1:3306"
|
||||
}
|
||||
return conf.Addr
|
||||
}
|
||||
|
||||
func init() {
|
||||
inputs.Add("mysql", func() telegraf.Input {
|
||||
return &Mysql{}
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestMysqlDefaultsToLocal(t *testing.T) {
|
||||
assert.True(t, acc.HasMeasurement("mysql"))
|
||||
}
|
||||
|
||||
func TestMysqlParseDSN(t *testing.T) {
|
||||
func TestMysqlGetDSNTag(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output string
|
||||
@@ -78,9 +78,9 @@ func TestMysqlParseDSN(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
output, _ := parseDSN(test.input)
|
||||
output := getDSNTag(test.input)
|
||||
if output != test.output {
|
||||
t.Errorf("Expected %s, got %s\n", test.output, output)
|
||||
t.Errorf("Input: %s Expected %s, got %s\n", test.input, test.output, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func TestMysqlDNSAddTimeout(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
"",
|
||||
"/?timeout=5s",
|
||||
"tcp(127.0.0.1:3306)/?timeout=5s",
|
||||
},
|
||||
{
|
||||
"tcp(192.168.1.1:3306)/",
|
||||
@@ -104,7 +104,19 @@ func TestMysqlDNSAddTimeout(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s",
|
||||
"root:passwd@tcp(192.168.1.1:3306)/?tls=false&timeout=10s",
|
||||
"root:passwd@tcp(192.168.1.1:3306)/?timeout=10s&tls=false",
|
||||
},
|
||||
{
|
||||
"tcp(10.150.1.123:3306)/",
|
||||
"tcp(10.150.1.123:3306)/?timeout=5s",
|
||||
},
|
||||
{
|
||||
"root:@!~(*&$#%(&@#(@&#Password@tcp(10.150.1.123:3306)/",
|
||||
"root:@!~(*&$#%(&@#(@&#Password@tcp(10.150.1.123:3306)/?timeout=5s",
|
||||
},
|
||||
{
|
||||
"root:Test3a#@!@tcp(10.150.1.123:3306)/",
|
||||
"root:Test3a#@!@tcp(10.150.1.123:3306)/?timeout=5s",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// parseDSN parses the DSN string to a config
|
||||
func parseDSN(dsn string) (string, error) {
|
||||
//var user, passwd string
|
||||
var addr, net string
|
||||
|
||||
// [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
|
||||
// Find the last '/' (since the password or the net addr might contain a '/')
|
||||
for i := len(dsn) - 1; i >= 0; i-- {
|
||||
if dsn[i] == '/' {
|
||||
var j, k int
|
||||
|
||||
// left part is empty if i <= 0
|
||||
if i > 0 {
|
||||
// [username[:password]@][protocol[(address)]]
|
||||
// Find the last '@' in dsn[:i]
|
||||
for j = i; j >= 0; j-- {
|
||||
if dsn[j] == '@' {
|
||||
// username[:password]
|
||||
// Find the first ':' in dsn[:j]
|
||||
for k = 0; k < j; k++ {
|
||||
if dsn[k] == ':' {
|
||||
//passwd = dsn[k+1 : j]
|
||||
break
|
||||
}
|
||||
}
|
||||
//user = dsn[:k]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// [protocol[(address)]]
|
||||
// Find the first '(' in dsn[j+1:i]
|
||||
for k = j + 1; k < i; k++ {
|
||||
if dsn[k] == '(' {
|
||||
// dsn[i-1] must be == ')' if an address is specified
|
||||
if dsn[i-1] != ')' {
|
||||
if strings.ContainsRune(dsn[k+1:i], ')') {
|
||||
return "", errors.New("Invalid DSN unescaped")
|
||||
}
|
||||
return "", errors.New("Invalid DSN Addr")
|
||||
}
|
||||
addr = dsn[k+1 : i-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
net = dsn[j+1 : k]
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Set default network if empty
|
||||
if net == "" {
|
||||
net = "tcp"
|
||||
}
|
||||
|
||||
// Set default address if empty
|
||||
if addr == "" {
|
||||
switch net {
|
||||
case "tcp":
|
||||
addr = "127.0.0.1:3306"
|
||||
case "unix":
|
||||
addr = "/tmp/mysql.sock"
|
||||
default:
|
||||
return "", errors.New("Default addr for network '" + net + "' unknown")
|
||||
}
|
||||
}
|
||||
|
||||
return addr, nil
|
||||
}
|
||||
@@ -28,12 +28,17 @@ type natsConsumer struct {
|
||||
Servers []string
|
||||
Secure bool
|
||||
|
||||
// Client pending limits:
|
||||
PendingMessageLimit int
|
||||
PendingBytesLimit int
|
||||
|
||||
// Legacy metric buffer support
|
||||
MetricBuffer int
|
||||
|
||||
parser parsers.Parser
|
||||
|
||||
sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
Conn *nats.Conn
|
||||
Subs []*nats.Subscription
|
||||
|
||||
@@ -47,13 +52,18 @@ type natsConsumer struct {
|
||||
|
||||
var sampleConfig = `
|
||||
## urls of NATS servers
|
||||
servers = ["nats://localhost:4222"]
|
||||
# servers = ["nats://localhost:4222"]
|
||||
## Use Transport Layer Security
|
||||
secure = false
|
||||
# secure = false
|
||||
## subject(s) to consume
|
||||
subjects = ["telegraf"]
|
||||
# subjects = ["telegraf"]
|
||||
## name a queue group
|
||||
queue_group = "telegraf_consumers"
|
||||
# queue_group = "telegraf_consumers"
|
||||
|
||||
## Sets the limits for pending msgs and bytes for each subscription
|
||||
## These shouldn't need to be adjusted except in very high throughput scenarios
|
||||
# pending_message_limit = 65536
|
||||
# pending_bytes_limit = 67108864
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
@@ -91,8 +101,15 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
|
||||
|
||||
var connectErr error
|
||||
|
||||
// set default NATS connection options
|
||||
opts := nats.DefaultOptions
|
||||
|
||||
// override max reconnection tries
|
||||
opts.MaxReconnect = -1
|
||||
|
||||
// override servers if any were specified
|
||||
opts.Servers = n.Servers
|
||||
|
||||
opts.Secure = n.Secure
|
||||
|
||||
if n.Conn == nil || n.Conn.IsClosed() {
|
||||
@@ -105,12 +122,22 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
|
||||
n.errs = make(chan error)
|
||||
n.Conn.SetErrorHandler(n.natsErrHandler)
|
||||
|
||||
n.in = make(chan *nats.Msg)
|
||||
n.in = make(chan *nats.Msg, 1000)
|
||||
for _, subj := range n.Subjects {
|
||||
sub, err := n.Conn.ChanQueueSubscribe(subj, n.QueueGroup, n.in)
|
||||
sub, err := n.Conn.QueueSubscribe(subj, n.QueueGroup, func(m *nats.Msg) {
|
||||
n.in <- m
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// ensure that the subscription has been processed by the server
|
||||
if err = n.Conn.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
// set the subscription pending limits
|
||||
if err = sub.SetPendingLimits(n.PendingMessageLimit, n.PendingBytesLimit); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Subs = append(n.Subs, sub)
|
||||
}
|
||||
}
|
||||
@@ -118,8 +145,9 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
|
||||
n.done = make(chan struct{})
|
||||
|
||||
// Start the message reader
|
||||
n.wg.Add(1)
|
||||
go n.receiver()
|
||||
log.Printf("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n",
|
||||
log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n",
|
||||
n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup)
|
||||
|
||||
return nil
|
||||
@@ -128,36 +156,30 @@ func (n *natsConsumer) Start(acc telegraf.Accumulator) error {
|
||||
// receiver() reads all incoming messages from NATS, and parses them into
|
||||
// telegraf metrics.
|
||||
func (n *natsConsumer) receiver() {
|
||||
defer n.clean()
|
||||
defer n.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-n.done:
|
||||
return
|
||||
case err := <-n.errs:
|
||||
log.Printf("error reading from %s\n", err.Error())
|
||||
log.Printf("E! error reading from %s\n", err.Error())
|
||||
case msg := <-n.in:
|
||||
metrics, err := n.parser.Parse(msg.Data)
|
||||
if err != nil {
|
||||
log.Printf("subject: %s, error: %s", msg.Subject, err.Error())
|
||||
log.Printf("E! subject: %s, error: %s", msg.Subject, err.Error())
|
||||
}
|
||||
|
||||
for _, metric := range metrics {
|
||||
n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time())
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *natsConsumer) clean() {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
close(n.in)
|
||||
close(n.errs)
|
||||
|
||||
for _, sub := range n.Subs {
|
||||
if err := sub.Unsubscribe(); err != nil {
|
||||
log.Printf("Error unsubscribing from subject %s in queue %s: %s\n",
|
||||
log.Printf("E! Error unsubscribing from subject %s in queue %s: %s\n",
|
||||
sub.Subject, sub.Queue, err.Error())
|
||||
}
|
||||
}
|
||||
@@ -170,6 +192,8 @@ func (n *natsConsumer) clean() {
|
||||
func (n *natsConsumer) Stop() {
|
||||
n.Lock()
|
||||
close(n.done)
|
||||
n.wg.Wait()
|
||||
n.clean()
|
||||
n.Unlock()
|
||||
}
|
||||
|
||||
@@ -179,6 +203,13 @@ func (n *natsConsumer) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
func init() {
|
||||
inputs.Add("nats_consumer", func() telegraf.Input {
|
||||
return &natsConsumer{}
|
||||
return &natsConsumer{
|
||||
Servers: []string{"nats://localhost:4222"},
|
||||
Secure: false,
|
||||
Subjects: []string{"telegraf"},
|
||||
QueueGroup: "telegraf_consumers",
|
||||
PendingBytesLimit: nats.DefaultSubPendingBytesLimit,
|
||||
PendingMessageLimit: nats.DefaultSubPendingMsgsLimit,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ func TestRunParser(t *testing.T) {
|
||||
defer close(n.done)
|
||||
|
||||
n.parser, _ = parsers.NewInfluxParser()
|
||||
n.wg.Add(1)
|
||||
go n.receiver()
|
||||
in <- natsMsg(testMsg)
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
@@ -56,6 +57,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
|
||||
defer close(n.done)
|
||||
|
||||
n.parser, _ = parsers.NewInfluxParser()
|
||||
n.wg.Add(1)
|
||||
go n.receiver()
|
||||
in <- natsMsg(invalidMsg)
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
@@ -73,6 +75,7 @@ func TestRunParserAndGather(t *testing.T) {
|
||||
defer close(n.done)
|
||||
|
||||
n.parser, _ = parsers.NewInfluxParser()
|
||||
n.wg.Add(1)
|
||||
go n.receiver()
|
||||
in <- natsMsg(testMsg)
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
@@ -91,6 +94,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) {
|
||||
defer close(n.done)
|
||||
|
||||
n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil)
|
||||
n.wg.Add(1)
|
||||
go n.receiver()
|
||||
in <- natsMsg(testMsgGraphite)
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
@@ -109,6 +113,7 @@ func TestRunParserAndGatherJSON(t *testing.T) {
|
||||
defer close(n.done)
|
||||
|
||||
n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil)
|
||||
n.wg.Add(1)
|
||||
go n.receiver()
|
||||
in <- natsMsg(testMsgJSON)
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
@@ -62,7 +62,7 @@ func (n *NSQConsumer) Start(acc telegraf.Accumulator) error {
|
||||
n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error {
|
||||
metrics, err := n.parser.Parse(message.Body)
|
||||
if err != nil {
|
||||
log.Printf("NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error())
|
||||
log.Printf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error())
|
||||
return nil
|
||||
}
|
||||
for _, metric := range metrics {
|
||||
|
||||
@@ -132,7 +132,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||
case strings.HasSuffix(when, "h"):
|
||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h"))
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
||||
log.Printf("E! Error ntpq: parsing int: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
// seconds in an hour
|
||||
@@ -141,7 +141,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||
case strings.HasSuffix(when, "d"):
|
||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d"))
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
||||
log.Printf("E! Error ntpq: parsing int: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
// seconds in a day
|
||||
@@ -150,7 +150,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||
case strings.HasSuffix(when, "m"):
|
||||
m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m"))
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
||||
log.Printf("E! Error ntpq: parsing int: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
// seconds in a day
|
||||
@@ -161,7 +161,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
m, err := strconv.Atoi(fields[index])
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing int: %s", fields[index])
|
||||
log.Printf("E! Error ntpq: parsing int: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
mFields[key] = int64(m)
|
||||
@@ -178,7 +178,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
m, err := strconv.ParseFloat(fields[index], 64)
|
||||
if err != nil {
|
||||
log.Printf("ERROR ntpq: parsing float: %s", fields[index])
|
||||
log.Printf("E! Error ntpq: parsing float: %s", fields[index])
|
||||
continue
|
||||
}
|
||||
mFields[key] = m
|
||||
|
||||
@@ -122,6 +122,9 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
|
||||
fcgiIp := socketAddr[0]
|
||||
fcgiPort, _ := strconv.Atoi(socketAddr[1])
|
||||
fcgi, err = newFcgiClient(fcgiIp, fcgiPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(u.Path) > 1 {
|
||||
statusPath = strings.Trim(u.Path, "/")
|
||||
} else {
|
||||
|
||||
@@ -52,13 +52,13 @@ const sampleConfig = `
|
||||
## urls to ping
|
||||
urls = ["www.google.com"] # required
|
||||
## number of pings to send per collection (ping -c <COUNT>)
|
||||
count = 1 # required
|
||||
# count = 1
|
||||
## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
|
||||
ping_interval = 0.0
|
||||
# ping_interval = 1.0
|
||||
## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
|
||||
timeout = 1.0
|
||||
# timeout = 1.0
|
||||
## interface to send ping from (ping -I <INTERFACE>)
|
||||
interface = ""
|
||||
# interface = ""
|
||||
`
|
||||
|
||||
func (_ *Ping) SampleConfig() string {
|
||||
@@ -200,6 +200,11 @@ func processPingOutput(out string) (int, int, float64, error) {
|
||||
|
||||
func init() {
|
||||
inputs.Add("ping", func() telegraf.Input {
|
||||
return &Ping{pingHost: hostPinger}
|
||||
return &Ping{
|
||||
pingHost: hostPinger,
|
||||
PingInterval: 1.0,
|
||||
Count: 1,
|
||||
Timeout: 1.0,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ type Postgresql struct {
|
||||
Tagvalue string
|
||||
Measurement string
|
||||
}
|
||||
Debug bool
|
||||
}
|
||||
|
||||
type query []struct {
|
||||
@@ -269,9 +270,7 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula
|
||||
fields := make(map[string]interface{})
|
||||
COLUMN:
|
||||
for col, val := range columnMap {
|
||||
if acc.Debug() {
|
||||
log.Printf("postgresql_extensible: column: %s = %T: %s\n", col, *val, *val)
|
||||
}
|
||||
log.Printf("D! postgresql_extensible: column: %s = %T: %s\n", col, *val, *val)
|
||||
_, ignore := ignoredColumns[col]
|
||||
if ignore || *val == nil {
|
||||
continue
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -86,10 +87,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error
|
||||
metrics := string(buf)
|
||||
|
||||
// Process data
|
||||
fields, err := parseResponse(metrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fields := parseResponse(metrics)
|
||||
|
||||
// Add server socket as a tag
|
||||
tags := map[string]string{"server": address}
|
||||
@@ -99,22 +97,27 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseResponse(metrics string) (map[string]interface{}, error) {
|
||||
func parseResponse(metrics string) map[string]interface{} {
|
||||
values := make(map[string]interface{})
|
||||
|
||||
s := strings.Split(metrics, ",")
|
||||
|
||||
for _, metric := range s[:len(s)-1] {
|
||||
m := strings.Split(metric, "=")
|
||||
if len(m) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(m[1], 10, 64)
|
||||
if err != nil {
|
||||
return values, err
|
||||
log.Printf("E! powerdns: Error parsing integer for metric [%s]: %s",
|
||||
metric, err)
|
||||
continue
|
||||
}
|
||||
values[m[0]] = i
|
||||
}
|
||||
|
||||
return values, nil
|
||||
return values
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -25,6 +25,30 @@ var metrics = "corrupt-packets=0,deferred-cache-inserts=0,deferred-cache-lookup=
|
||||
"key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," +
|
||||
"signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167,"
|
||||
|
||||
// first metric has no "="
|
||||
var corruptMetrics = "corrupt-packets--0,deferred-cache-inserts=0,deferred-cache-lookup=0," +
|
||||
"dnsupdate-answers=0,dnsupdate-changes=0,dnsupdate-queries=0," +
|
||||
"dnsupdate-refused=0,packetcache-hit=0,packetcache-miss=1,packetcache-size=0," +
|
||||
"query-cache-hit=0,query-cache-miss=6,rd-queries=1,recursing-answers=0," +
|
||||
"recursing-questions=0,recursion-unanswered=0,security-status=3," +
|
||||
"servfail-packets=0,signatures=0,tcp-answers=0,tcp-queries=0," +
|
||||
"timedout-packets=0,udp-answers=1,udp-answers-bytes=50,udp-do-queries=0," +
|
||||
"udp-queries=0,udp4-answers=1,udp4-queries=1,udp6-answers=0,udp6-queries=0," +
|
||||
"key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," +
|
||||
"signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167,"
|
||||
|
||||
// integer overflow
|
||||
var intOverflowMetrics = "corrupt-packets=18446744073709550195,deferred-cache-inserts=0,deferred-cache-lookup=0," +
|
||||
"dnsupdate-answers=0,dnsupdate-changes=0,dnsupdate-queries=0," +
|
||||
"dnsupdate-refused=0,packetcache-hit=0,packetcache-miss=1,packetcache-size=0," +
|
||||
"query-cache-hit=0,query-cache-miss=6,rd-queries=1,recursing-answers=0," +
|
||||
"recursing-questions=0,recursion-unanswered=0,security-status=3," +
|
||||
"servfail-packets=0,signatures=0,tcp-answers=0,tcp-queries=0," +
|
||||
"timedout-packets=0,udp-answers=1,udp-answers-bytes=50,udp-do-queries=0," +
|
||||
"udp-queries=0,udp4-answers=1,udp4-queries=1,udp6-answers=0,udp6-queries=0," +
|
||||
"key-cache-size=0,latency=26,meta-cache-size=0,qsize-q=0," +
|
||||
"signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167,"
|
||||
|
||||
func (s statServer) serverSocket(l net.Listener) {
|
||||
|
||||
for {
|
||||
@@ -86,8 +110,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPowerdnsParseMetrics(t *testing.T) {
|
||||
values, err := parseResponse(metrics)
|
||||
require.NoError(t, err, "Error parsing memcached response")
|
||||
values := parseResponse(metrics)
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
@@ -145,3 +168,121 @@ func TestPowerdnsParseMetrics(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPowerdnsParseCorruptMetrics(t *testing.T) {
|
||||
values := parseResponse(corruptMetrics)
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
value int64
|
||||
}{
|
||||
{"deferred-cache-inserts", 0},
|
||||
{"deferred-cache-lookup", 0},
|
||||
{"dnsupdate-answers", 0},
|
||||
{"dnsupdate-changes", 0},
|
||||
{"dnsupdate-queries", 0},
|
||||
{"dnsupdate-refused", 0},
|
||||
{"packetcache-hit", 0},
|
||||
{"packetcache-miss", 1},
|
||||
{"packetcache-size", 0},
|
||||
{"query-cache-hit", 0},
|
||||
{"query-cache-miss", 6},
|
||||
{"rd-queries", 1},
|
||||
{"recursing-answers", 0},
|
||||
{"recursing-questions", 0},
|
||||
{"recursion-unanswered", 0},
|
||||
{"security-status", 3},
|
||||
{"servfail-packets", 0},
|
||||
{"signatures", 0},
|
||||
{"tcp-answers", 0},
|
||||
{"tcp-queries", 0},
|
||||
{"timedout-packets", 0},
|
||||
{"udp-answers", 1},
|
||||
{"udp-answers-bytes", 50},
|
||||
{"udp-do-queries", 0},
|
||||
{"udp-queries", 0},
|
||||
{"udp4-answers", 1},
|
||||
{"udp4-queries", 1},
|
||||
{"udp6-answers", 0},
|
||||
{"udp6-queries", 0},
|
||||
{"key-cache-size", 0},
|
||||
{"latency", 26},
|
||||
{"meta-cache-size", 0},
|
||||
{"qsize-q", 0},
|
||||
{"signature-cache-size", 0},
|
||||
{"sys-msec", 2889},
|
||||
{"uptime", 86317},
|
||||
{"user-msec", 2167},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
value, ok := values[test.key]
|
||||
if !ok {
|
||||
t.Errorf("Did not find key for metric %s in values", test.key)
|
||||
continue
|
||||
}
|
||||
if value != test.value {
|
||||
t.Errorf("Metric: %s, Expected: %d, actual: %d",
|
||||
test.key, test.value, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPowerdnsParseIntOverflowMetrics(t *testing.T) {
|
||||
values := parseResponse(intOverflowMetrics)
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
value int64
|
||||
}{
|
||||
{"deferred-cache-inserts", 0},
|
||||
{"deferred-cache-lookup", 0},
|
||||
{"dnsupdate-answers", 0},
|
||||
{"dnsupdate-changes", 0},
|
||||
{"dnsupdate-queries", 0},
|
||||
{"dnsupdate-refused", 0},
|
||||
{"packetcache-hit", 0},
|
||||
{"packetcache-miss", 1},
|
||||
{"packetcache-size", 0},
|
||||
{"query-cache-hit", 0},
|
||||
{"query-cache-miss", 6},
|
||||
{"rd-queries", 1},
|
||||
{"recursing-answers", 0},
|
||||
{"recursing-questions", 0},
|
||||
{"recursion-unanswered", 0},
|
||||
{"security-status", 3},
|
||||
{"servfail-packets", 0},
|
||||
{"signatures", 0},
|
||||
{"tcp-answers", 0},
|
||||
{"tcp-queries", 0},
|
||||
{"timedout-packets", 0},
|
||||
{"udp-answers", 1},
|
||||
{"udp-answers-bytes", 50},
|
||||
{"udp-do-queries", 0},
|
||||
{"udp-queries", 0},
|
||||
{"udp4-answers", 1},
|
||||
{"udp4-queries", 1},
|
||||
{"udp6-answers", 0},
|
||||
{"udp6-queries", 0},
|
||||
{"key-cache-size", 0},
|
||||
{"latency", 26},
|
||||
{"meta-cache-size", 0},
|
||||
{"qsize-q", 0},
|
||||
{"signature-cache-size", 0},
|
||||
{"sys-msec", 2889},
|
||||
{"uptime", 86317},
|
||||
{"user-msec", 2167},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
value, ok := values[test.key]
|
||||
if !ok {
|
||||
t.Errorf("Did not find key for metric %s in values", test.key)
|
||||
continue
|
||||
}
|
||||
if value != test.value {
|
||||
t.Errorf("Metric: %s, Expected: %d, actual: %d",
|
||||
test.key, test.value, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ The plugin will tag processes by their PID and their process name.
|
||||
Processes can be specified either by pid file, by executable name, by command
|
||||
line pattern matching, or by username (in this order or priority. Procstat
|
||||
plugin will use `pgrep` when executable name is provided to obtain the pid.
|
||||
Proctstas plugin will transmit IO, memory, cpu, file descriptor related
|
||||
Procstat plugin will transmit IO, memory, cpu, file descriptor related
|
||||
measurements for every process specified. A prefix can be set to isolate
|
||||
individual process specific measurements.
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ func (_ *Procstat) Description() string {
|
||||
func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
||||
err := p.createProcesses()
|
||||
if err != nil {
|
||||
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
|
||||
log.Printf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s",
|
||||
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
|
||||
} else {
|
||||
for pid, proc := range p.pidmap {
|
||||
|
||||
@@ -107,7 +107,8 @@ type Queue struct {
|
||||
Node string
|
||||
Vhost string
|
||||
Durable bool
|
||||
AutoDelete bool `json:"auto_delete"`
|
||||
AutoDelete bool `json:"auto_delete"`
|
||||
IdleSince string `json:"idle_since"`
|
||||
}
|
||||
|
||||
// Node ...
|
||||
@@ -328,6 +329,7 @@ func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator, errChan chan error) {
|
||||
// common information
|
||||
"consumers": queue.Consumers,
|
||||
"consumer_utilisation": queue.ConsumerUtilisation,
|
||||
"idle_since": queue.IdleSince,
|
||||
"memory": queue.Memory,
|
||||
// messages information
|
||||
"message_bytes": queue.MessageBytes,
|
||||
|
||||
65
plugins/inputs/snmp/CONFIG-EXAMPLES.md
Normal file
65
plugins/inputs/snmp/CONFIG-EXAMPLES.md
Normal file
@@ -0,0 +1,65 @@
|
||||
Here are a few configuration examples for different use cases.
|
||||
|
||||
### Switch/router interface metrics
|
||||
|
||||
This setup will collect data on all interfaces from three different tables, `IF-MIB::ifTable`, `IF-MIB::ifXTable` and `EtherLike-MIB::dot3StatsTable`. It will also add the name from `IF-MIB::ifDescr` and use that as a tag. Depending on your needs and preferences you can easily use `IF-MIB::ifName` or `IF-MIB::ifAlias` instead or in addition. The values of these are typically:
|
||||
|
||||
IF-MIB::ifName = Gi0/0/0
|
||||
IF-MIB::ifDescr = GigabitEthernet0/0/0
|
||||
IF-MIB::ifAlias = ### LAN ###
|
||||
|
||||
This configuration also collects the hostname from the device (`RFC1213-MIB::sysName.0`) and adds as a tag. So each metric will both have the configured host/IP as `agent_host` as well as the device self-reported hostname as `hostname` and the name of the host that has collected these metrics as `host`.
|
||||
|
||||
Here is the configuration that you add to your `telegraf.conf`:
|
||||
|
||||
```
|
||||
[[inputs.snmp]]
|
||||
agents = [ "host.example.com" ]
|
||||
version = 2
|
||||
community = "public"
|
||||
|
||||
[[inputs.snmp.field]]
|
||||
name = "hostname"
|
||||
oid = "RFC1213-MIB::sysName.0"
|
||||
is_tag = true
|
||||
|
||||
[[inputs.snmp.field]]
|
||||
name = "uptime"
|
||||
oid = "DISMAN-EXPRESSION-MIB::sysUpTimeInstance"
|
||||
|
||||
# IF-MIB::ifTable contains counters on input and output traffic as well as errors and discards.
|
||||
[[inputs.snmp.table]]
|
||||
name = "interface"
|
||||
inherit_tags = [ "hostname" ]
|
||||
oid = "IF-MIB::ifTable"
|
||||
|
||||
# Interface tag - used to identify interface in metrics database
|
||||
[[inputs.snmp.table.field]]
|
||||
name = "ifDescr"
|
||||
oid = "IF-MIB::ifDescr"
|
||||
is_tag = true
|
||||
|
||||
# IF-MIB::ifXTable contains newer High Capacity (HC) counters that do not overflow as fast for a few of the ifTable counters
|
||||
[[inputs.snmp.table]]
|
||||
name = "interface"
|
||||
inherit_tags = [ "hostname" ]
|
||||
oid = "IF-MIB::ifXTable"
|
||||
|
||||
# Interface tag - used to identify interface in metrics database
|
||||
[[inputs.snmp.table.field]]
|
||||
name = "ifDescr"
|
||||
oid = "IF-MIB::ifDescr"
|
||||
is_tag = true
|
||||
|
||||
# EtherLike-MIB::dot3StatsTable contains detailed ethernet-level information about what kind of errors have been logged on an interface (such as FCS error, frame too long, etc)
|
||||
[[inputs.snmp.table]]
|
||||
name = "interface"
|
||||
inherit_tags = [ "hostname" ]
|
||||
oid = "EtherLike-MIB::dot3StatsTable"
|
||||
|
||||
# Interface tag - used to identify interface in metrics database
|
||||
[[inputs.snmp.table.field]]
|
||||
name = "ifDescr"
|
||||
oid = "IF-MIB::ifDescr"
|
||||
is_tag = true
|
||||
```
|
||||
53
plugins/inputs/snmp/DEBUGGING.md
Normal file
53
plugins/inputs/snmp/DEBUGGING.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Debugging & Testing SNMP Issues
|
||||
|
||||
### Install net-snmp on your system:
|
||||
|
||||
Mac:
|
||||
|
||||
```
|
||||
brew install net-snmp
|
||||
```
|
||||
|
||||
### Run an SNMP simulator docker image to get a full MIB on port 161:
|
||||
|
||||
```
|
||||
docker run -d -p 161:161/udp xeemetric/snmp-simulator
|
||||
```
|
||||
|
||||
### snmpget:
|
||||
|
||||
snmpget corresponds to the inputs.snmp.field configuration.
|
||||
|
||||
```bash
|
||||
$ # get an snmp field with fully-qualified MIB name.
|
||||
$ snmpget -v2c -c public localhost:161 system.sysUpTime.0
|
||||
DISMAN-EVENT-MIB::sysUpTimeInstance = Timeticks: (1643) 0:00:16.43
|
||||
|
||||
$ # get an snmp field, outputting the numeric OID.
|
||||
$ snmpget -On -v2c -c public localhost:161 system.sysUpTime.0
|
||||
.1.3.6.1.2.1.1.3.0 = Timeticks: (1638) 0:00:16.38
|
||||
```
|
||||
|
||||
### snmptranslate:
|
||||
|
||||
snmptranslate can be used to translate an OID to a MIB name:
|
||||
|
||||
```bash
|
||||
$ snmptranslate .1.3.6.1.2.1.1.3.0
|
||||
DISMAN-EVENT-MIB::sysUpTimeInstance
|
||||
```
|
||||
|
||||
And to convert a partial MIB name to a fully qualified one:
|
||||
|
||||
```bash
|
||||
$ snmptranslate -IR sysUpTime.0
|
||||
DISMAN-EVENT-MIB::sysUpTimeInstance
|
||||
```
|
||||
|
||||
And to convert a MIB name to an OID:
|
||||
|
||||
```bash
|
||||
$ snmptranslate -On -IR system.sysUpTime.0
|
||||
.1.3.6.1.2.1.1.3.0
|
||||
```
|
||||
|
||||
@@ -4,6 +4,8 @@ The SNMP input plugin gathers metrics from SNMP agents.
|
||||
|
||||
## Configuration:
|
||||
|
||||
See additional SNMP plugin configuration examples [here](./CONFIG-EXAMPLES.md).
|
||||
|
||||
### Example:
|
||||
|
||||
SNMP data:
|
||||
@@ -67,7 +69,7 @@ Resulting output:
|
||||
|
||||
#### Configuration via MIB:
|
||||
|
||||
This example uses the SNMP data above, but is configured via the MIB.
|
||||
This example uses the SNMP data above, but is configured via the MIB.
|
||||
The example MIB file can be found in the `testdata` directory. See the [MIB lookups](#mib-lookups) section for more information.
|
||||
|
||||
Telegraf config:
|
||||
@@ -95,70 +97,75 @@ Resulting output:
|
||||
|
||||
### Config parameters
|
||||
|
||||
* `agents`: Default: `[]`
|
||||
* `agents`: Default: `[]`
|
||||
List of SNMP agents to connect to in the form of `IP[:PORT]`. If `:PORT` is unspecified, it defaults to `161`.
|
||||
|
||||
* `version`: Default: `2`
|
||||
* `version`: Default: `2`
|
||||
SNMP protocol version to use.
|
||||
|
||||
* `community`: Default: `"public"`
|
||||
* `community`: Default: `"public"`
|
||||
SNMP community to use.
|
||||
|
||||
* `max_repetitions`: Default: `50`
|
||||
* `max_repetitions`: Default: `50`
|
||||
Maximum number of iterations for repeating variables.
|
||||
|
||||
* `sec_name`:
|
||||
* `sec_name`:
|
||||
Security name for authenticated SNMPv3 requests.
|
||||
|
||||
* `auth_protocol`: Values: `"MD5"`,`"SHA"`,`""`. Default: `""`
|
||||
* `auth_protocol`: Values: `"MD5"`,`"SHA"`,`""`. Default: `""`
|
||||
Authentication protocol for authenticated SNMPv3 requests.
|
||||
|
||||
* `auth_password`:
|
||||
* `auth_password`:
|
||||
Authentication password for authenticated SNMPv3 requests.
|
||||
|
||||
* `sec_level`: Values: `"noAuthNoPriv"`,`"authNoPriv"`,`"authPriv"`. Default: `"noAuthNoPriv"`
|
||||
* `sec_level`: Values: `"noAuthNoPriv"`,`"authNoPriv"`,`"authPriv"`. Default: `"noAuthNoPriv"`
|
||||
Security level used for SNMPv3 messages.
|
||||
|
||||
* `context_name`:
|
||||
* `context_name`:
|
||||
Context name used for SNMPv3 requests.
|
||||
|
||||
* `priv_protocol`: Values: `"DES"`,`"AES"`,`""`. Default: `""`
|
||||
* `priv_protocol`: Values: `"DES"`,`"AES"`,`""`. Default: `""`
|
||||
Privacy protocol used for encrypted SNMPv3 messages.
|
||||
|
||||
* `priv_password`:
|
||||
* `priv_password`:
|
||||
Privacy password used for encrypted SNMPv3 messages.
|
||||
|
||||
|
||||
* `name`:
|
||||
* `name`:
|
||||
Output measurement name.
|
||||
|
||||
#### Field parameters:
|
||||
* `oid`:
|
||||
* `oid`:
|
||||
OID to get. May be a numeric or textual OID.
|
||||
|
||||
* `name`:
|
||||
* `oid_index_suffix`:
|
||||
The OID sub-identifier to strip off so that the index can be matched against other fields in the table.
|
||||
|
||||
* `name`:
|
||||
Output field/tag name.
|
||||
If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made.
|
||||
|
||||
* `is_tag`:
|
||||
* `is_tag`:
|
||||
Output this field as a tag.
|
||||
|
||||
* `conversion`: Values: `"float(X)"`,`"float"`,`"int"`,`""`. Default: `""`
|
||||
* `conversion`: Values: `"float(X)"`,`"float"`,`"int"`,`""`. Default: `""`
|
||||
Converts the value according to the given specification.
|
||||
|
||||
- `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`.
|
||||
- `float`: Converts the value into a float with no adjustment. Same as `float(0)`.
|
||||
- `int`: Convertes the value into an integer.
|
||||
- `hwaddr`: Converts the value to a MAC address.
|
||||
- `ipaddr`: Converts the value to an IP address.
|
||||
|
||||
#### Table parameters:
|
||||
* `oid`:
|
||||
* `oid`:
|
||||
Automatically populates the table's fields using data from the MIB.
|
||||
|
||||
* `name`:
|
||||
* `name`:
|
||||
Output measurement name.
|
||||
If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made.
|
||||
|
||||
* `inherit_tags`:
|
||||
* `inherit_tags`:
|
||||
Which tags to inherit from the top-level config and to use in the output of this table's measurement.
|
||||
|
||||
### MIB lookups
|
||||
|
||||
@@ -20,25 +20,29 @@ import (
|
||||
const description = `Retrieves SNMP values from remote agents`
|
||||
const sampleConfig = `
|
||||
agents = [ "127.0.0.1:161" ]
|
||||
## Timeout for each SNMP query.
|
||||
timeout = "5s"
|
||||
## Number of retries to attempt within timeout.
|
||||
retries = 3
|
||||
## SNMP version, values can be 1, 2, or 3
|
||||
version = 2
|
||||
|
||||
# SNMPv1 & SNMPv2 parameters
|
||||
## SNMP community string.
|
||||
community = "public"
|
||||
|
||||
# SNMPv2 & SNMPv3 parameters
|
||||
max_repetitions = 50
|
||||
## The GETBULK max-repetitions parameter
|
||||
max_repetitions = 10
|
||||
|
||||
# SNMPv3 parameters
|
||||
## SNMPv3 auth parameters
|
||||
#sec_name = "myuser"
|
||||
#auth_protocol = "md5" # Values: "MD5", "SHA", ""
|
||||
#auth_password = "password123"
|
||||
#sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
|
||||
#auth_protocol = "md5" # Values: "MD5", "SHA", ""
|
||||
#auth_password = "pass"
|
||||
#sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
|
||||
#context_name = ""
|
||||
#priv_protocol = "" # Values: "DES", "AES", ""
|
||||
#priv_protocol = "" # Values: "DES", "AES", ""
|
||||
#priv_password = ""
|
||||
|
||||
# measurement name
|
||||
## measurement name
|
||||
name = "system"
|
||||
[[inputs.snmp.field]]
|
||||
name = "hostname"
|
||||
@@ -53,7 +57,7 @@ const sampleConfig = `
|
||||
oid = "HOST-RESOURCES-MIB::hrMemorySize"
|
||||
|
||||
[[inputs.snmp.table]]
|
||||
# measurement name
|
||||
## measurement name
|
||||
name = "remote_servers"
|
||||
inherit_tags = [ "hostname" ]
|
||||
[[inputs.snmp.table.field]]
|
||||
@@ -68,7 +72,7 @@ const sampleConfig = `
|
||||
oid = ".1.0.0.0.1.2"
|
||||
|
||||
[[inputs.snmp.table]]
|
||||
# auto populate table's fields using the MIB
|
||||
## auto populate table's fields using the MIB
|
||||
oid = "HOST-RESOURCES-MIB::hrNetworkTable"
|
||||
`
|
||||
|
||||
@@ -105,7 +109,7 @@ type Snmp struct {
|
||||
Community string
|
||||
|
||||
// Parameters for Version 2 & 3
|
||||
MaxRepetitions uint
|
||||
MaxRepetitions uint8
|
||||
|
||||
// Parameters for Version 3
|
||||
ContextName string
|
||||
@@ -174,33 +178,48 @@ type Table struct {
|
||||
initialized bool
|
||||
}
|
||||
|
||||
// init() populates Fields if a table OID is provided.
|
||||
// init() builds & initializes the nested fields.
|
||||
func (t *Table) init() error {
|
||||
if t.initialized {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := t.initBuild(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// initialize all the nested fields
|
||||
for i := range t.Fields {
|
||||
if err := t.Fields[i].init(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
t.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// init() populates Fields if a table OID is provided.
|
||||
func (t *Table) initBuild() error {
|
||||
if t.Oid == "" {
|
||||
t.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
mibPrefix := ""
|
||||
if err := snmpTranslate(&mibPrefix, &t.Oid, &t.Name); err != nil {
|
||||
return err
|
||||
mibName, _, oidText, _, err := snmpTranslate(t.Oid)
|
||||
if err != nil {
|
||||
return Errorf(err, "translating %s", t.Oid)
|
||||
}
|
||||
if t.Name == "" {
|
||||
t.Name = oidText
|
||||
}
|
||||
mibPrefix := mibName + "::"
|
||||
oidFullName := mibPrefix + oidText
|
||||
|
||||
// first attempt to get the table's tags
|
||||
tagOids := map[string]struct{}{}
|
||||
// We have to guess that the "entry" oid is `t.Oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info.
|
||||
if out, err := execCmd("snmptranslate", "-m", "all", "-Td", t.Oid+".1"); err == nil {
|
||||
if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil {
|
||||
lines := bytes.Split(out, []byte{'\n'})
|
||||
// get the MIB name if we didn't get it above
|
||||
if mibPrefix == "" {
|
||||
if i := bytes.Index(lines[0], []byte("::")); i != -1 {
|
||||
mibPrefix = string(lines[0][:i+2])
|
||||
}
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
if !bytes.HasPrefix(line, []byte(" INDEX")) {
|
||||
continue
|
||||
@@ -223,7 +242,7 @@ func (t *Table) init() error {
|
||||
}
|
||||
|
||||
// this won't actually try to run a query. The `-Ch` will just cause it to dump headers.
|
||||
out, err := execCmd("snmptable", "-m", "all", "-Ch", "-Cl", "-c", "public", "127.0.0.1", t.Oid)
|
||||
out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName)
|
||||
if err != nil {
|
||||
return Errorf(err, "getting table columns for %s", t.Oid)
|
||||
}
|
||||
@@ -240,14 +259,6 @@ func (t *Table) init() error {
|
||||
t.Fields = append(t.Fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag})
|
||||
}
|
||||
|
||||
// initialize all the nested fields
|
||||
for i := range t.Fields {
|
||||
if err := t.Fields[i].init(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
t.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -260,12 +271,16 @@ type Field struct {
|
||||
// off the OID prefix, and use the remainder as the index. For multiple fields
|
||||
// to show up in the same row, they must share the same index.
|
||||
Oid string
|
||||
// OidIndexSuffix is the trailing sub-identifier on a table record OID that will be stripped off to get the record's index.
|
||||
OidIndexSuffix string
|
||||
// IsTag controls whether this OID is output as a tag or a value.
|
||||
IsTag bool
|
||||
// Conversion controls any type conversion that is done on the value.
|
||||
// "float"/"float(0)" will convert the value into a float.
|
||||
// "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit.
|
||||
// "int" will conver the value into an integer.
|
||||
// "hwaddr" will convert a 6-byte string to a MAC address.
|
||||
// "ipaddr" will convert the value to an IPv4 or IPv6 address.
|
||||
Conversion string
|
||||
|
||||
initialized bool
|
||||
@@ -277,8 +292,16 @@ func (f *Field) init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := snmpTranslate(nil, &f.Oid, &f.Name); err != nil {
|
||||
return err
|
||||
_, oidNum, oidText, conversion, err := snmpTranslate(f.Oid)
|
||||
if err != nil {
|
||||
return Errorf(err, "translating %s", f.Oid)
|
||||
}
|
||||
f.Oid = oidNum
|
||||
if f.Name == "" {
|
||||
f.Name = oidText
|
||||
}
|
||||
if f.Conversion == "" {
|
||||
f.Conversion = conversion
|
||||
}
|
||||
|
||||
//TODO use textual convention conversion from the MIB
|
||||
@@ -330,8 +353,8 @@ func Errorf(err error, msg string, format ...interface{}) error {
|
||||
func init() {
|
||||
inputs.Add("snmp", func() telegraf.Input {
|
||||
return &Snmp{
|
||||
Retries: 5,
|
||||
MaxRepetitions: 50,
|
||||
Retries: 3,
|
||||
MaxRepetitions: 10,
|
||||
Timeout: internal.Duration{Duration: 5 * time.Second},
|
||||
Version: 2,
|
||||
Community: "public",
|
||||
@@ -446,16 +469,38 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
|
||||
// index, and being added on the same row.
|
||||
if pkt, err := gs.Get([]string{oid}); err != nil {
|
||||
return nil, Errorf(err, "performing get")
|
||||
} else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject {
|
||||
} else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance {
|
||||
ent := pkt.Variables[0]
|
||||
ifv[ent.Name[len(oid):]] = fieldConvert(f.Conversion, ent.Value)
|
||||
fv, err := fieldConvert(f.Conversion, ent.Value)
|
||||
if err != nil {
|
||||
return nil, Errorf(err, "converting %q", ent.Value)
|
||||
}
|
||||
if fvs, ok := fv.(string); !ok || fvs != "" {
|
||||
ifv[""] = fv
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error {
|
||||
if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." {
|
||||
return NestedError{} // break the walk
|
||||
}
|
||||
ifv[ent.Name[len(oid):]] = fieldConvert(f.Conversion, ent.Value)
|
||||
|
||||
idx := ent.Name[len(oid):]
|
||||
if f.OidIndexSuffix != "" {
|
||||
if !strings.HasSuffix(idx, f.OidIndexSuffix) {
|
||||
// this entry doesn't match our OidIndexSuffix. skip it
|
||||
return nil
|
||||
}
|
||||
idx = idx[:len(idx)-len(f.OidIndexSuffix)]
|
||||
}
|
||||
|
||||
fv, err := fieldConvert(f.Conversion, ent.Value)
|
||||
if err != nil {
|
||||
return Errorf(err, "converting %q", ent.Value)
|
||||
}
|
||||
if fvs, ok := fv.(string); !ok || fvs != "" {
|
||||
ifv[idx] = fv
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -610,7 +655,7 @@ func (s *Snmp) getConnection(agent string) (snmpConnection, error) {
|
||||
}
|
||||
}
|
||||
|
||||
gs.MaxRepetitions = int(s.MaxRepetitions)
|
||||
gs.MaxRepetitions = s.MaxRepetitions
|
||||
|
||||
if s.Version == 3 {
|
||||
gs.ContextName = s.ContextName
|
||||
@@ -677,14 +722,15 @@ func (s *Snmp) getConnection(agent string) (snmpConnection, error) {
|
||||
// "float"/"float(0)" will convert the value into a float.
|
||||
// "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit.
|
||||
// "int" will convert the value into an integer.
|
||||
// "hwaddr" will convert the value into a MAC address.
|
||||
// "ipaddr" will convert the value into into an IP address.
|
||||
// "" will convert a byte slice into a string.
|
||||
// Any other conv will return the input value unchanged.
|
||||
func fieldConvert(conv string, v interface{}) interface{} {
|
||||
func fieldConvert(conv string, v interface{}) (interface{}, error) {
|
||||
if conv == "" {
|
||||
if bs, ok := v.([]byte); ok {
|
||||
return string(bs)
|
||||
return string(bs), nil
|
||||
}
|
||||
return v
|
||||
return v, nil
|
||||
}
|
||||
|
||||
var d int
|
||||
@@ -721,7 +767,9 @@ func fieldConvert(conv string, v interface{}) interface{} {
|
||||
vf, _ := strconv.ParseFloat(vt, 64)
|
||||
v = vf / math.Pow10(d)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
if conv == "int" {
|
||||
switch vt := v.(type) {
|
||||
case float32:
|
||||
@@ -753,39 +801,112 @@ func fieldConvert(conv string, v interface{}) interface{} {
|
||||
case string:
|
||||
v, _ = strconv.Atoi(vt)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return v
|
||||
if conv == "hwaddr" {
|
||||
switch vt := v.(type) {
|
||||
case string:
|
||||
v = net.HardwareAddr(vt).String()
|
||||
case []byte:
|
||||
v = net.HardwareAddr(vt).String()
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid type (%T) for hwaddr conversion", v)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
if conv == "ipaddr" {
|
||||
var ipbs []byte
|
||||
|
||||
switch vt := v.(type) {
|
||||
case string:
|
||||
ipbs = []byte(vt)
|
||||
case []byte:
|
||||
ipbs = vt
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid type (%T) for ipaddr conversion", v)
|
||||
}
|
||||
|
||||
switch len(ipbs) {
|
||||
case 4, 16:
|
||||
v = net.IP(ipbs).String()
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid length (%d) for ipaddr conversion", len(ipbs))
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid conversion type '%s'", conv)
|
||||
}
|
||||
|
||||
// snmpTranslate resolves the given OID.
|
||||
// The contents of the oid parameter will be replaced with the numeric oid value.
|
||||
// If name is empty, the textual OID value is stored in it. If the textual OID cannot be translated, the numeric OID is stored instead.
|
||||
// If mibPrefix is non-nil, the MIB in which the OID was found is stored, with a suffix of "::".
|
||||
func snmpTranslate(mibPrefix *string, oid *string, name *string) error {
|
||||
if strings.ContainsAny(*oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") {
|
||||
out, err := execCmd("snmptranslate", "-m", "all", "-On", *oid)
|
||||
if err != nil {
|
||||
return Errorf(err, "translating %s", *oid)
|
||||
}
|
||||
*oid = string(bytes.TrimSuffix(out, []byte{'\n'}))
|
||||
func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
|
||||
var out []byte
|
||||
if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") {
|
||||
out, err = execCmd("snmptranslate", "-Td", "-Ob", oid)
|
||||
} else {
|
||||
out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid)
|
||||
}
|
||||
if err != nil {
|
||||
return "", "", "", "", err
|
||||
}
|
||||
|
||||
if *name == "" {
|
||||
out, err := execCmd("snmptranslate", "-m", "all", *oid)
|
||||
bb := bytes.NewBuffer(out)
|
||||
|
||||
oidText, err = bb.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", "", "", "", Errorf(err, "getting OID text")
|
||||
}
|
||||
oidText = oidText[:len(oidText)-1]
|
||||
|
||||
i := strings.Index(oidText, "::")
|
||||
if i == -1 {
|
||||
// was not found in MIB.
|
||||
if bytes.Index(bb.Bytes(), []byte(" [TRUNCATED]")) >= 0 {
|
||||
return "", oid, oid, "", nil
|
||||
}
|
||||
// not truncated, but not fully found. We still need to parse out numeric OID, so keep going
|
||||
oidText = oid
|
||||
} else {
|
||||
mibName = oidText[:i]
|
||||
oidText = oidText[i+2:]
|
||||
}
|
||||
|
||||
if i := bytes.Index(bb.Bytes(), []byte(" -- TEXTUAL CONVENTION ")); i != -1 {
|
||||
bb.Next(i + len(" -- TEXTUAL CONVENTION "))
|
||||
tc, err := bb.ReadString('\n')
|
||||
if err != nil {
|
||||
//TODO debug message
|
||||
*name = *oid
|
||||
return "", "", "", "", Errorf(err, "getting textual convention")
|
||||
}
|
||||
tc = tc[:len(tc)-1]
|
||||
switch tc {
|
||||
case "MacAddress", "PhysAddress":
|
||||
conversion = "hwaddr"
|
||||
case "InetAddressIPv4", "InetAddressIPv6", "InetAddress":
|
||||
conversion = "ipaddr"
|
||||
}
|
||||
}
|
||||
|
||||
i = bytes.Index(bb.Bytes(), []byte("::= { "))
|
||||
bb.Next(i + len("::= { "))
|
||||
objs, err := bb.ReadString('}')
|
||||
if err != nil {
|
||||
return "", "", "", "", Errorf(err, "getting numeric oid")
|
||||
}
|
||||
objs = objs[:len(objs)-1]
|
||||
for _, obj := range strings.Split(objs, " ") {
|
||||
if len(obj) == 0 {
|
||||
continue
|
||||
}
|
||||
if i := strings.Index(obj, "("); i != -1 {
|
||||
obj = obj[i+1:]
|
||||
oidNum += "." + obj[:strings.Index(obj, ")")]
|
||||
} else {
|
||||
if i := bytes.Index(out, []byte("::")); i != -1 {
|
||||
if mibPrefix != nil {
|
||||
*mibPrefix = string(out[:i+2])
|
||||
}
|
||||
out = out[i+2:]
|
||||
}
|
||||
*name = string(bytes.TrimSuffix(out, []byte{'\n'}))
|
||||
oidNum += "." + obj
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return mibName, oidNum, oidText, conversion, nil
|
||||
}
|
||||
|
||||
100
plugins/inputs/snmp/snmp_mocks_generate.go
Normal file
100
plugins/inputs/snmp/snmp_mocks_generate.go
Normal file
@@ -0,0 +1,100 @@
|
||||
// +build generate
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// This file is a generator used to generate the mocks for the commands used by the tests.
|
||||
|
||||
// These are the commands to be mocked.
|
||||
var mockedCommands = [][]string{
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.1"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.2"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", "1.0.0.1.1"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.4"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"},
|
||||
{"snmptranslate", "-Td", "-Ob", ".iso.2.3"},
|
||||
{"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::server"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::server.0"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::testTable"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::connections"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::latency"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TEST::hostname"},
|
||||
{"snmptranslate", "-Td", "-Ob", "IF-MIB::ifPhysAddress.1"},
|
||||
{"snmptranslate", "-Td", "-Ob", "BRIDGE-MIB::dot1dTpFdbAddress.1"},
|
||||
{"snmptranslate", "-Td", "-Ob", "TCP-MIB::tcpConnectionLocalAddress.1"},
|
||||
{"snmptranslate", "-Td", "TEST::testTable.1"},
|
||||
{"snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", "TEST::testTable"},
|
||||
}
|
||||
|
||||
type mockedCommandResult struct {
|
||||
stdout string
|
||||
stderr string
|
||||
exitError bool
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := generate(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func generate() error {
|
||||
f, err := os.OpenFile("snmp_mocks_test.go", os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
br := bufio.NewReader(f)
|
||||
var i int64
|
||||
for l, err := br.ReadString('\n'); err == nil; l, err = br.ReadString('\n') {
|
||||
i += int64(len(l))
|
||||
if l == "// BEGIN GO GENERATE CONTENT\n" {
|
||||
break
|
||||
}
|
||||
}
|
||||
f.Truncate(i)
|
||||
f.Seek(i, 0)
|
||||
|
||||
fmt.Fprintf(f, "var mockedCommandResults = map[string]mockedCommandResult{\n")
|
||||
|
||||
for _, cmd := range mockedCommands {
|
||||
ec := exec.Command(cmd[0], cmd[1:]...)
|
||||
out := bytes.NewBuffer(nil)
|
||||
err := bytes.NewBuffer(nil)
|
||||
ec.Stdout = out
|
||||
ec.Stderr = err
|
||||
ec.Env = []string{
|
||||
"MIBDIRS=+./testdata",
|
||||
}
|
||||
|
||||
var mcr mockedCommandResult
|
||||
if err := ec.Run(); err != nil {
|
||||
if err, ok := err.(*exec.ExitError); !ok {
|
||||
mcr.exitError = true
|
||||
} else {
|
||||
return fmt.Errorf("executing %v: %s", cmd, err)
|
||||
}
|
||||
}
|
||||
mcr.stdout = string(out.Bytes())
|
||||
mcr.stderr = string(err.Bytes())
|
||||
cmd0 := strings.Join(cmd, "\000")
|
||||
mcrv := fmt.Sprintf("%#v", mcr)[5:] // trim `main.` prefix
|
||||
fmt.Fprintf(f, "%#v: %s,\n", cmd0, mcrv)
|
||||
}
|
||||
f.Write([]byte("}\n"))
|
||||
f.Close()
|
||||
|
||||
return exec.Command("gofmt", "-w", "snmp_mocks_test.go").Run()
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user