Compare commits
27 Commits
kb-procsta
...
v0.3.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8b54c73ae4 | ||
|
|
c9ef073fba | ||
|
|
15f66d7d1b | ||
|
|
b0f79f43ec | ||
|
|
c584129758 | ||
|
|
d1930c90b5 | ||
|
|
1e76e36df2 | ||
|
|
a73b5257dc | ||
|
|
c16be04ca7 | ||
|
|
5513275f2c | ||
|
|
3a7b1688a3 | ||
|
|
35d5c7bae3 | ||
|
|
60b6693ae3 | ||
|
|
c1e1f2ace4 | ||
|
|
6698d195d8 | ||
|
|
23b21ca86a | ||
|
|
56e14e4731 | ||
|
|
7deb339b76 | ||
|
|
0e55c371b7 | ||
|
|
f284c8c154 | ||
|
|
e3b314cacb | ||
|
|
9fce094b36 | ||
|
|
319c363c8e | ||
|
|
40d84accee | ||
|
|
3fc43df84e | ||
|
|
59f804d77a | ||
|
|
96d5f0d0de |
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -1,4 +0,0 @@
|
|||||||
CHANGELOG.md merge=union
|
|
||||||
README.md merge=union
|
|
||||||
plugins/inputs/all/all.go merge=union
|
|
||||||
plugins/outputs/all/all.go merge=union
|
|
||||||
44
.github/ISSUE_TEMPLATE.md
vendored
44
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,44 +0,0 @@
|
|||||||
## Directions
|
|
||||||
|
|
||||||
GitHub Issues are reserved for actionable bug reports and feature requests.
|
|
||||||
General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
|
|
||||||
|
|
||||||
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
|
||||||
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
|
||||||
Erase the other section and everything on and above this line.
|
|
||||||
|
|
||||||
*Please note, the quickest way to fix a bug is to open a Pull Request.*
|
|
||||||
|
|
||||||
## Bug report
|
|
||||||
|
|
||||||
### Relevant telegraf.conf:
|
|
||||||
|
|
||||||
### System info:
|
|
||||||
|
|
||||||
[Include Telegraf version, operating system name, and other relevant details]
|
|
||||||
|
|
||||||
### Steps to reproduce:
|
|
||||||
|
|
||||||
1. ...
|
|
||||||
2. ...
|
|
||||||
|
|
||||||
### Expected behavior:
|
|
||||||
|
|
||||||
### Actual behavior:
|
|
||||||
|
|
||||||
### Additional info:
|
|
||||||
|
|
||||||
[Include gist of relevant config, logs, etc.]
|
|
||||||
|
|
||||||
|
|
||||||
## Feature Request
|
|
||||||
|
|
||||||
Opening a feature request kicks off a discussion.
|
|
||||||
|
|
||||||
### Proposal:
|
|
||||||
|
|
||||||
### Current behavior:
|
|
||||||
|
|
||||||
### Desired behavior:
|
|
||||||
|
|
||||||
### Use case: [Why is this important (helps with prioritizing requests)]
|
|
||||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,5 +0,0 @@
|
|||||||
### Required for all PRs:
|
|
||||||
|
|
||||||
- [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
|
|
||||||
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
|
|
||||||
- [ ] README.md updated (if adding a new plugin)
|
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,7 +1,4 @@
|
|||||||
build
|
|
||||||
tivan
|
tivan
|
||||||
.vagrant
|
.vagrant
|
||||||
/telegraf
|
telegraf
|
||||||
.idea
|
.idea
|
||||||
*~
|
|
||||||
*#
|
|
||||||
|
|||||||
912
CHANGELOG.md
912
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
177
CONFIGURATION.md
Normal file
177
CONFIGURATION.md
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
# Telegraf Configuration
|
||||||
|
|
||||||
|
## Plugin Configuration
|
||||||
|
|
||||||
|
There are some configuration options that are configurable per plugin:
|
||||||
|
|
||||||
|
* **name_override**: Override the base name of the measurement.
|
||||||
|
(Default is the name of the plugin).
|
||||||
|
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||||
|
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||||
|
* **tags**: A map of tags to apply to a specific plugin's measurements.
|
||||||
|
|
||||||
|
### Plugin Filters
|
||||||
|
|
||||||
|
There are also filters that can be configured per plugin:
|
||||||
|
|
||||||
|
* **pass**: An array of strings that is used to filter metrics generated by the
|
||||||
|
current plugin. Each string in the array is tested as a glob match against field names
|
||||||
|
and if it matches, the field is emitted.
|
||||||
|
* **drop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||||
|
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||||
|
measurements by the current plugin. Each string in the array is tested as a glob
|
||||||
|
match against the tag name, and if it matches the measurement is emitted.
|
||||||
|
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not emitted.
|
||||||
|
This is tested on measurements that have passed the tagpass test.
|
||||||
|
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||||
|
global interval, but if one particular plugin should be run less or more often,
|
||||||
|
you can configure that here.
|
||||||
|
|
||||||
|
### Plugin Configuration Examples
|
||||||
|
|
||||||
|
This is a full working config that will output CPU data to an InfluxDB instance
|
||||||
|
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||||
|
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||||
|
fields which begin with `time_`.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[tags]
|
||||||
|
dc = "denver-1"
|
||||||
|
|
||||||
|
[agent]
|
||||||
|
interval = "10s"
|
||||||
|
|
||||||
|
# OUTPUTS
|
||||||
|
[outputs]
|
||||||
|
[[outputs.influxdb]]
|
||||||
|
url = "http://192.168.59.103:8086" # required.
|
||||||
|
database = "telegraf" # required.
|
||||||
|
precision = "s"
|
||||||
|
|
||||||
|
# PLUGINS
|
||||||
|
[plugins]
|
||||||
|
[[plugins.cpu]]
|
||||||
|
percpu = true
|
||||||
|
totalcpu = false
|
||||||
|
# filter all fields beginning with 'time_'
|
||||||
|
drop = ["time_*"]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plugin Config: tagpass and tagdrop
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[plugins]
|
||||||
|
[[plugins.cpu]]
|
||||||
|
percpu = true
|
||||||
|
totalcpu = false
|
||||||
|
drop = ["cpu_time"]
|
||||||
|
# Don't collect CPU data for cpu6 & cpu7
|
||||||
|
[plugins.cpu.tagdrop]
|
||||||
|
cpu = [ "cpu6", "cpu7" ]
|
||||||
|
|
||||||
|
[[plugins.disk]]
|
||||||
|
[plugins.disk.tagpass]
|
||||||
|
# tagpass conditions are OR, not AND.
|
||||||
|
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||||
|
# then the metric passes
|
||||||
|
fstype = [ "ext4", "xfs" ]
|
||||||
|
# Globs can also be used on the tag values
|
||||||
|
path = [ "/opt", "/home*" ]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plugin Config: pass and drop
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# Drop all metrics for guest & steal CPU usage
|
||||||
|
[[plugins.cpu]]
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
drop = ["usage_guest", "usage_steal"]
|
||||||
|
|
||||||
|
# Only store inode related metrics for disks
|
||||||
|
[[plugins.disk]]
|
||||||
|
pass = ["inodes*"]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plugin config: prefix, suffix, and override
|
||||||
|
|
||||||
|
This plugin will emit measurements with the name `cpu_total`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[plugins.cpu]]
|
||||||
|
name_suffix = "_total"
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
```
|
||||||
|
|
||||||
|
This will emit measurements with the name `foobar`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[plugins.cpu]]
|
||||||
|
name_override = "foobar"
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plugin config: tags
|
||||||
|
|
||||||
|
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||||
|
`tag2=bar`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[plugins.cpu]]
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
[plugins.cpu.tags]
|
||||||
|
tag1 = "foo"
|
||||||
|
tag2 = "bar"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multiple plugins of the same type
|
||||||
|
|
||||||
|
Additional plugins (or outputs) of the same type can be specified,
|
||||||
|
just define more instances in the config file:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[plugins.cpu]]
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
|
||||||
|
[[plugins.cpu]]
|
||||||
|
percpu = true
|
||||||
|
totalcpu = false
|
||||||
|
drop = ["cpu_time*"]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Configuration
|
||||||
|
|
||||||
|
Telegraf also supports specifying multiple output sinks to send data to,
|
||||||
|
configuring each output sink is different, but examples can be
|
||||||
|
found by running `telegraf -sample-config`.
|
||||||
|
|
||||||
|
Outputs also support the same configurable options as plugins
|
||||||
|
(pass, drop, tagpass, tagdrop), added in 0.2.4
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[outputs.influxdb]]
|
||||||
|
urls = [ "http://localhost:8086" ]
|
||||||
|
database = "telegraf"
|
||||||
|
precision = "s"
|
||||||
|
# Drop all measurements that start with "aerospike"
|
||||||
|
drop = ["aerospike*"]
|
||||||
|
|
||||||
|
[[outputs.influxdb]]
|
||||||
|
urls = [ "http://localhost:8086" ]
|
||||||
|
database = "telegraf-aerospike-data"
|
||||||
|
precision = "s"
|
||||||
|
# Only accept aerospike data:
|
||||||
|
pass = ["aerospike*"]
|
||||||
|
|
||||||
|
[[outputs.influxdb]]
|
||||||
|
urls = [ "http://localhost:8086" ]
|
||||||
|
database = "telegraf-cpu0-data"
|
||||||
|
precision = "s"
|
||||||
|
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||||
|
[outputs.influxdb.tagpass]
|
||||||
|
cpu = ["cpu0"]
|
||||||
|
```
|
||||||
288
CONTRIBUTING.md
288
CONTRIBUTING.md
@@ -1,74 +1,103 @@
|
|||||||
## Steps for Contributing:
|
|
||||||
|
|
||||||
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
|
||||||
1. Make changes or write plugin (see below for details)
|
|
||||||
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
|
|
||||||
1. If your plugin requires a new Go package,
|
|
||||||
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
|
||||||
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
|
||||||
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
|
|
||||||
Output plugins READMEs are less structured,
|
|
||||||
but any information you can provide on how the data will look is appreciated.
|
|
||||||
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
|
||||||
for a good example.
|
|
||||||
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
|
|
||||||
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
|
|
||||||
|
|
||||||
## GoDoc
|
|
||||||
|
|
||||||
Public interfaces for inputs, outputs, metrics, and the accumulator can be found
|
|
||||||
on the GoDoc
|
|
||||||
|
|
||||||
[](https://godoc.org/github.com/influxdata/telegraf)
|
|
||||||
|
|
||||||
## Sign the CLA
|
## Sign the CLA
|
||||||
|
|
||||||
Before we can merge a pull request, you will need to sign the CLA,
|
Before we can merge a pull request, you will need to sign the CLA,
|
||||||
which can be found [on our website](http://influxdb.com/community/cla.html)
|
which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||||
|
|
||||||
## Adding a dependency
|
## Plugins
|
||||||
|
|
||||||
Assuming you can already build the project, run these in the telegraf directory:
|
This section is for developers who want to create new collection plugins.
|
||||||
|
|
||||||
1. `go get github.com/sparrc/gdm`
|
|
||||||
1. `gdm restore`
|
|
||||||
1. `GOOS=linux gdm save`
|
|
||||||
|
|
||||||
## Input Plugins
|
|
||||||
|
|
||||||
This section is for developers who want to create new collection inputs.
|
|
||||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||||
pick and chose what is gathered and makes it easy for developers
|
pick and chose what is gathered as well as makes it easy for developers
|
||||||
to create new ways of generating metrics.
|
to create new ways of generating metrics.
|
||||||
|
|
||||||
Plugin authorship is kept as simple as possible to promote people to develop
|
Plugin authorship is kept as simple as possible to promote people to develop
|
||||||
and submit new inputs.
|
and submit new plugins.
|
||||||
|
|
||||||
### Input Plugin Guidelines
|
### Plugin Guidelines
|
||||||
|
|
||||||
* A plugin must conform to the `telegraf.Input` interface.
|
* A plugin must conform to the `plugins.Plugin` interface.
|
||||||
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
* Each generated metric automatically has the name of the plugin that generated
|
||||||
|
it prepended. This is to keep plugins honest.
|
||||||
|
* Plugins should call `plugins.Add` in their `init` function to register themselves.
|
||||||
See below for a quick example.
|
See below for a quick example.
|
||||||
* Input Plugins must be added to the
|
* To be available within Telegraf itself, plugins must add themselves to the
|
||||||
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
`github.com/influxdb/telegraf/plugins/all/all.go` file.
|
||||||
* The `SampleConfig` function should return valid toml that describes how the
|
* The `SampleConfig` function should return valid toml that describes how the
|
||||||
plugin can be configured. This is include in `telegraf -sample-config`.
|
plugin can be configured. This is include in `telegraf -sample-config`.
|
||||||
* The `Description` function should say in one line what this plugin does.
|
* The `Description` function should say in one line what this plugin does.
|
||||||
|
|
||||||
Let's say you've written a plugin that emits metrics about processes on the
|
### Plugin interface
|
||||||
current host.
|
|
||||||
|
|
||||||
### Input Plugin Example
|
```go
|
||||||
|
type Plugin interface {
|
||||||
|
SampleConfig() string
|
||||||
|
Description() string
|
||||||
|
Gather(Accumulator) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type Accumulator interface {
|
||||||
|
Add(measurement string,
|
||||||
|
value interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
timestamp ...time.Time)
|
||||||
|
AddFields(measurement string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
timestamp ...time.Time)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Accumulator
|
||||||
|
|
||||||
|
The way that a plugin emits metrics is by interacting with the Accumulator.
|
||||||
|
|
||||||
|
The `Add` function takes 3 arguments:
|
||||||
|
* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`.
|
||||||
|
* **value**: A value for the metric. This accepts 5 different types of value:
|
||||||
|
* **int**: The most common type. All int types are accepted but favor using `int64`
|
||||||
|
Useful for counters, etc.
|
||||||
|
* **float**: Favor `float64`, useful for gauges, percentages, etc.
|
||||||
|
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc.
|
||||||
|
* **string**: Typically used to indicate a message, or some kind of freeform information.
|
||||||
|
* **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`.
|
||||||
|
* **tags**: This is a map of strings to strings to describe the where or who
|
||||||
|
about the metric. For instance, the `net` plugin adds a tag named `"interface"`
|
||||||
|
set to the name of the network interface, like `"eth0"`.
|
||||||
|
|
||||||
|
The `AddFieldsWithTime` allows multiple values for a point to be passed. The values
|
||||||
|
used are the same type profile as **value** above. The **timestamp** argument
|
||||||
|
allows a point to be registered as having occurred at an arbitrary time.
|
||||||
|
|
||||||
|
Let's say you've written a plugin that emits metrics about processes on the current host.
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
type Process struct {
|
||||||
|
CPUTime float64
|
||||||
|
MemoryBytes int64
|
||||||
|
PID int
|
||||||
|
}
|
||||||
|
|
||||||
|
func Gather(acc plugins.Accumulator) error {
|
||||||
|
for _, process := range system.Processes() {
|
||||||
|
tags := map[string]string {
|
||||||
|
"pid": fmt.Sprintf("%d", process.Pid),
|
||||||
|
}
|
||||||
|
|
||||||
|
acc.Add("cpu", process.CPUTime, tags, time.Now())
|
||||||
|
acc.Add("memory", process.MemoryBytes, tags, time.Now())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plugin Example
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package simple
|
package simple
|
||||||
|
|
||||||
// simple.go
|
// simple.go
|
||||||
|
|
||||||
import (
|
import "github.com/influxdb/telegraf/plugins"
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Simple struct {
|
type Simple struct {
|
||||||
Ok bool
|
Ok bool
|
||||||
@@ -82,83 +111,30 @@ func (s *Simple) SampleConfig() string {
|
|||||||
return "ok = true # indicate if everything is fine"
|
return "ok = true # indicate if everything is fine"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
func (s *Simple) Gather(acc plugins.Accumulator) error {
|
||||||
if s.Ok {
|
if s.Ok {
|
||||||
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
|
acc.Add("state", "pretty good", nil)
|
||||||
} else {
|
} else {
|
||||||
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
|
acc.Add("state", "not great", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
|
plugins.Add("simple", func() plugins.Plugin { return &Simple{} })
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Adding Typed Metrics
|
## Service Plugins
|
||||||
|
|
||||||
In addition the the `AddFields` function, the accumulator also supports an
|
|
||||||
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
|
|
||||||
metrics. Metric types are ignored for the InfluxDB output, but can be used
|
|
||||||
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
|
|
||||||
|
|
||||||
## Input Plugins Accepting Arbitrary Data Formats
|
|
||||||
|
|
||||||
Some input plugins (such as
|
|
||||||
[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec))
|
|
||||||
accept arbitrary input data formats. An overview of these data formats can
|
|
||||||
be found
|
|
||||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
|
||||||
|
|
||||||
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
|
|
||||||
function on the plugin object (see the exec plugin for an example), as well as
|
|
||||||
defining `parser` as a field of the object.
|
|
||||||
|
|
||||||
You can then utilize the parser internally in your plugin, parsing data as you
|
|
||||||
see fit. Telegraf's configuration layer will take care of instantiating and
|
|
||||||
creating the `Parser` object.
|
|
||||||
|
|
||||||
You should also add the following to your SampleConfig() return:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
## Data format to consume.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
||||||
data_format = "influx"
|
|
||||||
```
|
|
||||||
|
|
||||||
Below is the `Parser` interface.
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Parser is an interface defining functions that a parser plugin must satisfy.
|
|
||||||
type Parser interface {
|
|
||||||
// Parse takes a byte buffer separated by newlines
|
|
||||||
// ie, `cpu.usage.idle 90\ncpu.usage.busy 10`
|
|
||||||
// and parses it into telegraf metrics
|
|
||||||
Parse(buf []byte) ([]telegraf.Metric, error)
|
|
||||||
|
|
||||||
// ParseLine takes a single string metric
|
|
||||||
// ie, "cpu.usage.idle 90"
|
|
||||||
// and parses it into a telegraf metric.
|
|
||||||
ParseLine(line string) (telegraf.Metric, error)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
And you can view the code
|
|
||||||
[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go)
|
|
||||||
|
|
||||||
## Service Input Plugins
|
|
||||||
|
|
||||||
This section is for developers who want to create new "service" collection
|
This section is for developers who want to create new "service" collection
|
||||||
inputs. A service plugin differs from a regular plugin in that it operates
|
plugins. A service plugin differs from a regular plugin in that it operates
|
||||||
a background service while Telegraf is running. One example would be the `statsd`
|
a background service while Telegraf is running. One example would be the `statsd`
|
||||||
plugin, which operates a statsd server.
|
plugin, which operates a statsd server.
|
||||||
|
|
||||||
Service Input Plugins are substantially more complicated than a regular plugin, as they
|
Service Plugins are substantially more complicated than a regular plugin, as they
|
||||||
will require threads and locks to verify data integrity. Service Input Plugins should
|
will require threads and locks to verify data integrity. Service Plugins should
|
||||||
be avoided unless there is no way to create their behavior with a regular plugin.
|
be avoided unless there is no way to create their behavior with a regular plugin.
|
||||||
|
|
||||||
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
||||||
@@ -167,25 +143,49 @@ and `Stop()` methods.
|
|||||||
### Service Plugin Guidelines
|
### Service Plugin Guidelines
|
||||||
|
|
||||||
* Same as the `Plugin` guidelines, except that they must conform to the
|
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||||
`inputs.ServiceInput` interface.
|
`plugins.ServicePlugin` interface.
|
||||||
|
|
||||||
## Output Plugins
|
### Service Plugin interface
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ServicePlugin interface {
|
||||||
|
SampleConfig() string
|
||||||
|
Description() string
|
||||||
|
Gather(Accumulator) error
|
||||||
|
Start() error
|
||||||
|
Stop()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Outputs
|
||||||
|
|
||||||
This section is for developers who want to create a new output sink. Outputs
|
This section is for developers who want to create a new output sink. Outputs
|
||||||
are created in a similar manner as collection plugins, and their interface has
|
are created in a similar manner as collection plugins, and their interface has
|
||||||
similar constructs.
|
similar constructs.
|
||||||
|
|
||||||
### Output Plugin Guidelines
|
### Output Guidelines
|
||||||
|
|
||||||
* An output must conform to the `outputs.Output` interface.
|
* An output must conform to the `outputs.Output` interface.
|
||||||
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||||
See below for a quick example.
|
See below for a quick example.
|
||||||
* To be available within Telegraf itself, plugins must add themselves to the
|
* To be available within Telegraf itself, plugins must add themselves to the
|
||||||
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
`github.com/influxdb/telegraf/outputs/all/all.go` file.
|
||||||
* The `SampleConfig` function should return valid toml that describes how the
|
* The `SampleConfig` function should return valid toml that describes how the
|
||||||
output can be configured. This is include in `telegraf -sample-config`.
|
output can be configured. This is include in `telegraf -sample-config`.
|
||||||
* The `Description` function should say in one line what this output does.
|
* The `Description` function should say in one line what this output does.
|
||||||
|
|
||||||
|
### Output interface
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Output interface {
|
||||||
|
Connect() error
|
||||||
|
Close() error
|
||||||
|
Description() string
|
||||||
|
SampleConfig() string
|
||||||
|
Write(points []*client.Point) error
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### Output Example
|
### Output Example
|
||||||
|
|
||||||
```go
|
```go
|
||||||
@@ -193,10 +193,7 @@ package simpleoutput
|
|||||||
|
|
||||||
// simpleoutput.go
|
// simpleoutput.go
|
||||||
|
|
||||||
import (
|
import "github.com/influxdb/telegraf/outputs"
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Simple struct {
|
type Simple struct {
|
||||||
Ok bool
|
Ok bool
|
||||||
@@ -220,47 +217,20 @@ func (s *Simple) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
func (s *Simple) Write(points []*client.Point) error {
|
||||||
for _, metric := range metrics {
|
for _, pt := range points {
|
||||||
// write `metric` to the output sink here
|
// write `pt` to the output sink here
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
|
outputs.Add("simpleoutput", func() outputs.Output { return &Simple{} })
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Output Plugins Writing Arbitrary Data Formats
|
## Service Outputs
|
||||||
|
|
||||||
Some output plugins (such as
|
|
||||||
[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file))
|
|
||||||
can write arbitrary output data formats. An overview of these data formats can
|
|
||||||
be found
|
|
||||||
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
|
|
||||||
|
|
||||||
In order to enable this, you must specify a
|
|
||||||
`SetSerializer(serializer serializers.Serializer)`
|
|
||||||
function on the plugin object (see the file plugin for an example), as well as
|
|
||||||
defining `serializer` as a field of the object.
|
|
||||||
|
|
||||||
You can then utilize the serializer internally in your plugin, serializing data
|
|
||||||
before it's written. Telegraf's configuration layer will take care of
|
|
||||||
instantiating and creating the `Serializer` object.
|
|
||||||
|
|
||||||
You should also add the following to your SampleConfig() return:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
## Data format to output.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
||||||
data_format = "influx"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Service Output Plugins
|
|
||||||
|
|
||||||
This section is for developers who want to create new "service" output. A
|
This section is for developers who want to create new "service" output. A
|
||||||
service output differs from a regular output in that it operates a background service
|
service output differs from a regular output in that it operates a background service
|
||||||
@@ -273,7 +243,21 @@ and `Stop()` methods.
|
|||||||
### Service Output Guidelines
|
### Service Output Guidelines
|
||||||
|
|
||||||
* Same as the `Output` guidelines, except that they must conform to the
|
* Same as the `Output` guidelines, except that they must conform to the
|
||||||
`output.ServiceOutput` interface.
|
`plugins.ServiceOutput` interface.
|
||||||
|
|
||||||
|
### Service Output interface
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ServiceOutput interface {
|
||||||
|
Connect() error
|
||||||
|
Close() error
|
||||||
|
Description() string
|
||||||
|
SampleConfig() string
|
||||||
|
Write(points []*client.Point) error
|
||||||
|
Start() error
|
||||||
|
Stop()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Unit Tests
|
## Unit Tests
|
||||||
|
|
||||||
@@ -290,7 +274,7 @@ which would take some time to replicate.
|
|||||||
To overcome this situation we've decided to use docker containers to provide a
|
To overcome this situation we've decided to use docker containers to provide a
|
||||||
fast and reproducible environment to test those services which require it.
|
fast and reproducible environment to test those services which require it.
|
||||||
For other situations
|
For other situations
|
||||||
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
|
(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go )
|
||||||
a simple mock will suffice.
|
a simple mock will suffice.
|
||||||
|
|
||||||
To execute Telegraf tests follow these simple steps:
|
To execute Telegraf tests follow these simple steps:
|
||||||
@@ -299,6 +283,10 @@ To execute Telegraf tests follow these simple steps:
|
|||||||
instructions
|
instructions
|
||||||
- execute `make test`
|
- execute `make test`
|
||||||
|
|
||||||
|
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
|
||||||
|
The Makefile will assume that you have a `docker-machine` box called `default` to
|
||||||
|
get the IP address.
|
||||||
|
|
||||||
### Unit test troubleshooting
|
### Unit test troubleshooting
|
||||||
|
|
||||||
Try cleaning up your test environment by executing `make docker-kill` and
|
Try cleaning up your test environment by executing `make docker-kill` and
|
||||||
|
|||||||
91
Godeps
91
Godeps
@@ -1,65 +1,52 @@
|
|||||||
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
|
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
|
||||||
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
|
github.com/Shopify/sarama 159e9990b0796511607dd0d7aaa3eb37d1829d16
|
||||||
github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
|
github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81
|
||||||
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
|
github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339
|
||||||
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
|
github.com/armon/go-metrics 06b60999766278efd6d2b5d8418a58c3d5b99e87
|
||||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
github.com/aws/aws-sdk-go 999b1591218c36d5050d1ba7266eba956e65965f
|
||||||
|
github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
|
||||||
|
github.com/boltdb/bolt b34b35ea8d06bb9ae69d9a349119252e4c1d8ee0
|
||||||
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
|
||||||
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
|
github.com/dancannon/gorethink a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||||
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
|
|
||||||
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
|
||||||
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
|
|
||||||
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||||
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
|
github.com/eapache/go-resiliency f341fb4dca45128e4aa86389fa6a675d55fe25e1
|
||||||
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
|
|
||||||
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
|
|
||||||
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
|
||||||
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
|
||||||
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
|
github.com/fsouza/go-dockerclient 7177a9e3543b0891a5d91dbf7051e0f71455c8ef
|
||||||
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
|
github.com/go-ini/ini 9314fb0ef64171d6a3d0a4fa570dfa33441cba05
|
||||||
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
|
github.com/go-sql-driver/mysql d512f204a577a4ab037a1816604c48c9c13210be
|
||||||
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
|
github.com/gogo/protobuf e492fd34b12d0230755c45aa5fb1e1eea6a84aa9
|
||||||
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
|
github.com/golang/protobuf 68415e7123da32b07eab49c96d2c4d6158360e9b
|
||||||
|
github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a
|
||||||
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
|
||||||
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
|
github.com/hailocab/go-hostpool 0637eae892be221164aff5fcbccc57171aea6406
|
||||||
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
|
github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458
|
||||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294
|
||||||
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
|
github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee
|
||||||
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
|
github.com/influxdb/influxdb 69a7664f2d4b75aec300b7cbfc7e57c971721f04
|
||||||
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
|
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||||
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
|
github.com/klauspost/crc32 0aff1ea9c20474c3901672b5b6ead0ac611156de
|
||||||
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
|
github.com/lib/pq 11fc39a580a008f1f39bb3d11d984fb34ed778d9
|
||||||
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
|
||||||
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
|
|
||||||
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
|
|
||||||
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
|
|
||||||
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
|
|
||||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||||
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
|
|
||||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||||
github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
|
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||||
github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
|
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||||
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
|
github.com/pborman/uuid cccd189d45f7ac3368a0d127efb7f4d08ae0b655
|
||||||
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
github.com/pmezard/go-difflib e8554b8641db39598be7f6342874b958f12ae1d4
|
||||||
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
|
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||||
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
|
github.com/prometheus/common 56b90312e937d43b930f06a59bf0d6a4ae1944bc
|
||||||
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
|
||||||
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
|
||||||
github.com/shirou/gopsutil 4d0c402af66c78735c5ccf820dc2ca7de5e4ff08
|
github.com/shirou/gopsutil fc932d9090f13a84fb4b3cb8baa124610cab184c
|
||||||
github.com/soniah/gosnmp eb32571c2410868d85849ad67d1e51d01273eb84
|
|
||||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
github.com/stretchr/testify e3a8ff8ce36581f87a15341206f205b1da467059
|
||||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
|
||||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||||
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
|
golang.org/x/crypto 7b85b097bf7527677d54d3220065e966a0e3b613
|
||||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
golang.org/x/net 1796f9b8b7178e3c7587dff118d3bb9d37f9b0b3
|
||||||
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
gopkg.in/dancannon/gorethink.v1 a124c9663325ed9f7fb669d17c69961b59151e6e
|
||||||
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
|
|
||||||
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
|
|
||||||
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
|
|
||||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||||
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
|
gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49
|
||||||
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
|
gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
|
||||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
|
||||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
|
||||||
github.com/lxn/win 950a0e81e7678e63d8e6cd32412bdecb325ccd88
|
|
||||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
|
||||||
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
|
|
||||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
|
||||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
|
||||||
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
|
||||||
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
|
||||||
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
|
||||||
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
|
||||||
@@ -16,7 +16,6 @@
|
|||||||
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||||
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||||
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||||
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
|
||||||
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||||
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||||
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||||
@@ -29,5 +28,6 @@
|
|||||||
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||||
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||||
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||||
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||||
|
- internal Glob function [MIT LICENSE](https://github.com/ryanuber/go-glob/blob/master/LICENSE)
|
||||||
|
|
||||||
72
Makefile
72
Makefile
@@ -1,6 +1,5 @@
|
|||||||
|
UNAME := $(shell sh -c 'uname')
|
||||||
VERSION := $(shell sh -c 'git describe --always --tags')
|
VERSION := $(shell sh -c 'git describe --always --tags')
|
||||||
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
|
|
||||||
COMMIT := $(shell sh -c 'git rev-parse HEAD')
|
|
||||||
ifdef GOBIN
|
ifdef GOBIN
|
||||||
PATH := $(GOBIN):$(PATH)
|
PATH := $(GOBIN):$(PATH)
|
||||||
else
|
else
|
||||||
@@ -10,87 +9,90 @@ endif
|
|||||||
# Standard Telegraf build
|
# Standard Telegraf build
|
||||||
default: prepare build
|
default: prepare build
|
||||||
|
|
||||||
# Windows build
|
|
||||||
windows: prepare-windows build-windows
|
|
||||||
|
|
||||||
# Only run the build (no dependency grabbing)
|
# Only run the build (no dependency grabbing)
|
||||||
build:
|
build:
|
||||||
go install -ldflags \
|
go build -o telegraf -ldflags \
|
||||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
|
"-X main.Version=$(VERSION)" \
|
||||||
|
|
||||||
build-windows:
|
|
||||||
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
|
|
||||||
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
|
||||||
./cmd/telegraf/telegraf.go
|
./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
build-for-docker:
|
# Build with race detector
|
||||||
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
|
dev: prepare
|
||||||
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
|
go build -race -o telegraf -ldflags \
|
||||||
|
"-X main.Version=$(VERSION)" \
|
||||||
./cmd/telegraf/telegraf.go
|
./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
# run package script
|
# Build linux 64-bit, 32-bit and arm architectures
|
||||||
package:
|
build-linux-bins: prepare
|
||||||
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
|
GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \
|
||||||
|
-ldflags "-X main.Version=$(VERSION)" \
|
||||||
|
./cmd/telegraf/telegraf.go
|
||||||
|
GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \
|
||||||
|
-ldflags "-X main.Version=$(VERSION)" \
|
||||||
|
./cmd/telegraf/telegraf.go
|
||||||
|
GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \
|
||||||
|
-ldflags "-X main.Version=$(VERSION)" \
|
||||||
|
./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
# Get dependencies and use gdm to checkout changesets
|
# Get dependencies and use gdm to checkout changesets
|
||||||
prepare:
|
prepare:
|
||||||
|
go get ./...
|
||||||
go get github.com/sparrc/gdm
|
go get github.com/sparrc/gdm
|
||||||
gdm restore
|
gdm restore
|
||||||
|
|
||||||
# Use the windows godeps file to prepare dependencies
|
|
||||||
prepare-windows:
|
|
||||||
go get github.com/sparrc/gdm
|
|
||||||
gdm restore
|
|
||||||
gdm restore -f Godeps_windows
|
|
||||||
|
|
||||||
# Run all docker containers necessary for unit tests
|
# Run all docker containers necessary for unit tests
|
||||||
docker-run:
|
docker-run:
|
||||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
ifeq ($(UNAME), Darwin)
|
||||||
|
docker run --name kafka \
|
||||||
|
-e ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \
|
||||||
|
-e ADVERTISED_PORT=9092 \
|
||||||
|
-p "2181:2181" -p "9092:9092" \
|
||||||
|
-d spotify/kafka
|
||||||
|
endif
|
||||||
|
ifeq ($(UNAME), Linux)
|
||||||
docker run --name kafka \
|
docker run --name kafka \
|
||||||
-e ADVERTISED_HOST=localhost \
|
-e ADVERTISED_HOST=localhost \
|
||||||
-e ADVERTISED_PORT=9092 \
|
-e ADVERTISED_PORT=9092 \
|
||||||
-p "2181:2181" -p "9092:9092" \
|
-p "2181:2181" -p "9092:9092" \
|
||||||
-d spotify/kafka
|
-d spotify/kafka
|
||||||
|
endif
|
||||||
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
|
||||||
docker run --name memcached -p "11211:11211" -d memcached
|
docker run --name memcached -p "11211:11211" -d memcached
|
||||||
docker run --name postgres -p "5432:5432" -d postgres
|
docker run --name postgres -p "5432:5432" -d postgres
|
||||||
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
|
||||||
|
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||||
docker run --name redis -p "6379:6379" -d redis
|
docker run --name redis -p "6379:6379" -d redis
|
||||||
|
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||||
docker run --name nats -p "4222:4222" -d nats
|
|
||||||
|
|
||||||
# Run docker containers necessary for CircleCI unit tests
|
# Run docker containers necessary for CircleCI unit tests
|
||||||
docker-run-circle:
|
docker-run-circle:
|
||||||
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
|
|
||||||
docker run --name kafka \
|
docker run --name kafka \
|
||||||
-e ADVERTISED_HOST=localhost \
|
-e ADVERTISED_HOST=localhost \
|
||||||
-e ADVERTISED_PORT=9092 \
|
-e ADVERTISED_PORT=9092 \
|
||||||
-p "2181:2181" -p "9092:9092" \
|
-p "2181:2181" -p "9092:9092" \
|
||||||
-d spotify/kafka
|
-d spotify/kafka
|
||||||
|
docker run --name opentsdb -p "4242:4242" -d petergrace/opentsdb-docker
|
||||||
|
docker run --name aerospike -p "3000:3000" -d aerospike
|
||||||
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
|
||||||
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
|
||||||
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
docker run --name riemann -p "5555:5555" -d blalor/riemann
|
||||||
docker run --name nats -p "4222:4222" -d nats
|
|
||||||
|
|
||||||
# Kill all docker containers, ignore errors
|
# Kill all docker containers, ignore errors
|
||||||
docker-kill:
|
docker-kill:
|
||||||
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
-docker kill nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||||
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
|
-docker rm nsq aerospike redis opentsdb rabbitmq postgres memcached mysql kafka mqtt riemann
|
||||||
|
|
||||||
# Run full unit tests using docker containers (includes setup and teardown)
|
# Run full unit tests using docker containers (includes setup and teardown)
|
||||||
test: vet docker-kill docker-run
|
test: docker-kill docker-run
|
||||||
# Sleeping for kafka leadership election, TSDB setup, etc.
|
# Sleeping for kafka leadership election, TSDB setup, etc.
|
||||||
sleep 60
|
sleep 60
|
||||||
# SUCCESS, running tests
|
# SUCCESS, running tests
|
||||||
go test -race ./...
|
go test -race ./...
|
||||||
|
|
||||||
# Run "short" unit tests
|
# Run "short" unit tests
|
||||||
test-short: vet
|
test-short:
|
||||||
go test -short ./...
|
go test -short ./...
|
||||||
|
|
||||||
vet:
|
.PHONY: test
|
||||||
go vet ./...
|
|
||||||
|
|
||||||
.PHONY: test test-short vet build default
|
|
||||||
|
|||||||
287
README.md
287
README.md
@@ -1,63 +1,48 @@
|
|||||||
# Telegraf [](https://circleci.com/gh/influxdata/telegraf) [](https://hub.docker.com/_/telegraf/)
|
# Telegraf - A native agent for InfluxDB [](https://circleci.com/gh/influxdb/telegraf)
|
||||||
|
|
||||||
Telegraf is an agent written in Go for collecting metrics from the system it's
|
Telegraf is an agent written in Go for collecting metrics from the system it's
|
||||||
running on, or from other services, and writing them into InfluxDB or other
|
running on, or from other services, and writing them into InfluxDB.
|
||||||
[outputs](https://github.com/influxdata/telegraf#supported-output-plugins).
|
|
||||||
|
|
||||||
Design goals are to have a minimal memory footprint with a plugin system so
|
Design goals are to have a minimal memory footprint with a plugin system so
|
||||||
that developers in the community can easily add support for collecting metrics
|
that developers in the community can easily add support for collecting metrics
|
||||||
from well known services (like Hadoop, Postgres, or Redis) and third party
|
from well known services (like Hadoop, Postgres, or Redis) and third party
|
||||||
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
|
||||||
|
|
||||||
New input and output plugins are designed to be easy to contribute,
|
We'll eagerly accept pull requests for new plugins and will manage the set of
|
||||||
we'll eagerly accept pull
|
plugins that Telegraf supports. See the
|
||||||
requests and will manage the set of plugins that Telegraf supports.
|
[contributing guide](CONTRIBUTING.md) for instructions on
|
||||||
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
|
writing new plugins.
|
||||||
new plugins.
|
|
||||||
|
|
||||||
## Installation:
|
## Installation:
|
||||||
|
|
||||||
### Linux deb and rpm Packages:
|
### Linux deb and rpm packages:
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_amd64.deb
|
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.x86_64.rpm
|
* http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm
|
||||||
|
|
||||||
Latest (arm):
|
##### Package instructions:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf_1.0.0_armhf.deb
|
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0.armhf.rpm
|
|
||||||
|
|
||||||
##### Package Instructions:
|
* Telegraf binary is installed in `/opt/telegraf/telegraf`
|
||||||
|
* Telegraf daemon configuration file is in `/etc/opt/telegraf/telegraf.conf`
|
||||||
* Telegraf binary is installed in `/usr/bin/telegraf`
|
|
||||||
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
|
|
||||||
* On sysv systems, the telegraf daemon can be controlled via
|
* On sysv systems, the telegraf daemon can be controlled via
|
||||||
`service telegraf [action]`
|
`service telegraf [action]`
|
||||||
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
|
||||||
controlled via `systemctl [action] telegraf`
|
controlled via `systemctl [action] telegraf`
|
||||||
|
|
||||||
### yum/apt Repositories:
|
### Linux binaries:
|
||||||
|
|
||||||
There is a yum/apt repo available for the whole InfluxData stack, see
|
|
||||||
[here](https://docs.influxdata.com/influxdb/v0.10/introduction/installation/#installation)
|
|
||||||
for instructions on setting up the repo. Once it is configured, you will be able
|
|
||||||
to use this repo to install & update telegraf.
|
|
||||||
|
|
||||||
### Linux tarballs:
|
|
||||||
|
|
||||||
Latest:
|
Latest:
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_amd64.tar.gz
|
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_i386.tar.gz
|
* http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_linux_armhf.tar.gz
|
* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz
|
||||||
|
|
||||||
### FreeBSD tarball:
|
##### Binary instructions:
|
||||||
|
|
||||||
Latest:
|
These are standalone binaries that can be unpacked and executed on any linux
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_freebsd_amd64.tar.gz
|
system. They can be unpacked and renamed in a location such as
|
||||||
|
`/usr/local/bin` for convenience. A config file will need to be generated,
|
||||||
### Ansible Role:
|
see "How to use it" below.
|
||||||
|
|
||||||
Ansible role: https://github.com/rossmcdonald/telegraf
|
|
||||||
|
|
||||||
### OSX via Homebrew:
|
### OSX via Homebrew:
|
||||||
|
|
||||||
@@ -66,140 +51,87 @@ brew update
|
|||||||
brew install telegraf
|
brew install telegraf
|
||||||
```
|
```
|
||||||
|
|
||||||
### Windows Binaries (EXPERIMENTAL)
|
|
||||||
|
|
||||||
Latest:
|
|
||||||
* https://dl.influxdata.com/telegraf/releases/telegraf-1.0.0_windows_amd64.zip
|
|
||||||
|
|
||||||
### From Source:
|
### From Source:
|
||||||
|
|
||||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||||
which gets installed via the Makefile
|
which gets installed via the Makefile
|
||||||
if you don't have it already. You also must build with golang version 1.5+.
|
if you don't have it already. You also must build with golang version 1.4+.
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install)
|
1. [Install Go](https://golang.org/doc/install)
|
||||||
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||||
3. Run `go get github.com/influxdata/telegraf`
|
3. Run `go get github.com/influxdb/telegraf`
|
||||||
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
4. Run `cd $GOPATH/src/github.com/influxdb/telegraf`
|
||||||
5. Run `make`
|
5. Run `make`
|
||||||
|
|
||||||
## How to use it:
|
### How to use it:
|
||||||
|
|
||||||
```console
|
* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration.
|
||||||
$ telegraf -help
|
* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf`.
|
||||||
Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
to create a config file with only CPU and memory plugins defined, and InfluxDB
|
||||||
|
output defined.
|
||||||
|
* Edit the configuration to match your needs.
|
||||||
|
* Run `telegraf -config telegraf.conf -test` to output one full measurement
|
||||||
|
sample to STDOUT. NOTE: you may want to run as the telegraf user if you are using
|
||||||
|
the linux packages `sudo -u telegraf telegraf -config telegraf.conf -test`
|
||||||
|
* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs.
|
||||||
|
* Run `telegraf -config telegraf.conf -filter system:swap`.
|
||||||
|
to run telegraf with only the system & swap plugins defined in the config.
|
||||||
|
|
||||||
Usage:
|
## Telegraf Options
|
||||||
|
|
||||||
telegraf <flags>
|
Telegraf has a few options you can configure under the `agent` section of the
|
||||||
|
config.
|
||||||
|
|
||||||
The flags are:
|
* **hostname**: The hostname is passed as a tag. By default this will be
|
||||||
|
the value returned by `hostname` on the machine running Telegraf.
|
||||||
-config <file> configuration file to load
|
You can override that value here.
|
||||||
-test gather metrics once, print them to stdout, and exit
|
* **interval**: How often to gather metrics. Uses a simple number +
|
||||||
-sample-config print out full sample configuration to stdout
|
unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes.
|
||||||
-config-directory directory containing additional *.conf files
|
* **debug**: Set to true to gather and send metrics to STDOUT as well as
|
||||||
-input-filter filter the input plugins to enable, separator is :
|
InfluxDB.
|
||||||
-output-filter filter the output plugins to enable, separator is :
|
|
||||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
|
||||||
-debug print metrics as they're generated to stdout
|
|
||||||
-quiet run in quiet mode
|
|
||||||
-version print the version to stdout
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
# generate a telegraf config file:
|
|
||||||
telegraf -sample-config > telegraf.conf
|
|
||||||
|
|
||||||
# generate config with only cpu input & influxdb output plugins defined
|
|
||||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
|
||||||
|
|
||||||
# run a single telegraf collection, outputing metrics to stdout
|
|
||||||
telegraf -config telegraf.conf -test
|
|
||||||
|
|
||||||
# run telegraf with all plugins defined in config file
|
|
||||||
telegraf -config telegraf.conf
|
|
||||||
|
|
||||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
|
||||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
|
See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced
|
||||||
configuration options.
|
configuration options.
|
||||||
|
|
||||||
## Supported Input Plugins
|
## Supported Plugins
|
||||||
|
|
||||||
Telegraf currently has support for collecting metrics from many sources. For
|
**You can view usage instructions for each plugin by running**
|
||||||
more information on each, please look at the directory of the same name in
|
`telegraf -usage <pluginname>`.
|
||||||
`plugins/inputs`.
|
|
||||||
|
|
||||||
Currently implemented sources:
|
Telegraf currently has support for collecting metrics from:
|
||||||
|
|
||||||
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cloudwatch)
|
* aerospike
|
||||||
* [aerospike](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aerospike)
|
* apache
|
||||||
* [apache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/apache)
|
* bcache
|
||||||
* [bcache](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bcache)
|
* disque
|
||||||
* [cassandra](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cassandra)
|
* elasticsearch
|
||||||
* [ceph](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ceph)
|
* exec (generic JSON-emitting executable plugin)
|
||||||
* [chrony](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/chrony)
|
* haproxy
|
||||||
* [consul](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/consul)
|
* httpjson (generic JSON-emitting http service plugin)
|
||||||
* [conntrack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/conntrack)
|
* influxdb
|
||||||
* [couchbase](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchbase)
|
* jolokia
|
||||||
* [couchdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/couchdb)
|
* leofs
|
||||||
* [disque](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disque)
|
* lustre2
|
||||||
* [dns query time](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dns_query)
|
* mailchimp
|
||||||
* [docker](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker)
|
* memcached
|
||||||
* [dovecot](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dovecot)
|
* mongodb
|
||||||
* [elasticsearch](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/elasticsearch)
|
* mysql
|
||||||
* [exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
* nginx
|
||||||
* [filestat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filestat)
|
* phpfpm
|
||||||
* [haproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/haproxy)
|
* ping
|
||||||
* [hddtemp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/hddtemp)
|
* postgresql
|
||||||
* [http_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_response)
|
* procstat
|
||||||
* [httpjson](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
* prometheus
|
||||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/influxdb)
|
* puppetagent
|
||||||
* [ipmi_sensor](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ipmi_sensor)
|
* rabbitmq
|
||||||
* [iptables](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/iptables)
|
* redis
|
||||||
* [jolokia](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia)
|
* rethinkdb
|
||||||
* [leofs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/leofs)
|
* twemproxy
|
||||||
* [lustre2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/lustre2)
|
* zfs
|
||||||
* [mailchimp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mailchimp)
|
* zookeeper
|
||||||
* [memcached](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/memcached)
|
* system
|
||||||
* [mesos](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mesos)
|
|
||||||
* [mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb)
|
|
||||||
* [mysql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql)
|
|
||||||
* [net_response](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/net_response)
|
|
||||||
* [nginx](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nginx)
|
|
||||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq)
|
|
||||||
* [nstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nstat)
|
|
||||||
* [ntpq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ntpq)
|
|
||||||
* [phpfpm](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/phpfpm)
|
|
||||||
* [phusion passenger](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/passenger)
|
|
||||||
* [ping](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ping)
|
|
||||||
* [postgresql](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql)
|
|
||||||
* [postgresql_extensible](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/postgresql_extensible)
|
|
||||||
* [powerdns](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/powerdns)
|
|
||||||
* [procstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/procstat)
|
|
||||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/prometheus)
|
|
||||||
* [puppetagent](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/puppetagent)
|
|
||||||
* [rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq)
|
|
||||||
* [raindrops](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/raindrops)
|
|
||||||
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
|
|
||||||
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
|
|
||||||
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
|
|
||||||
* [sensors](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors)
|
|
||||||
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
|
|
||||||
* [snmp_legacy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp_legacy)
|
|
||||||
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
|
|
||||||
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)
|
|
||||||
* [varnish](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/varnish)
|
|
||||||
* [zfs](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zfs)
|
|
||||||
* [zookeeper](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/zookeeper)
|
|
||||||
* [win_perf_counters ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters) (windows performance counters)
|
|
||||||
* [sysstat](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sysstat)
|
|
||||||
* [system](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system)
|
|
||||||
* cpu
|
* cpu
|
||||||
* mem
|
* mem
|
||||||
* net
|
* net
|
||||||
@@ -207,54 +139,33 @@ Currently implemented sources:
|
|||||||
* disk
|
* disk
|
||||||
* diskio
|
* diskio
|
||||||
* swap
|
* swap
|
||||||
* processes
|
|
||||||
* kernel (/proc/stat)
|
|
||||||
* kernel (/proc/vmstat)
|
|
||||||
|
|
||||||
Telegraf can also collect metrics via the following service plugins:
|
## Supported Service Plugins
|
||||||
|
|
||||||
* [http_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/http_listener)
|
Telegraf can collect metrics via the following services:
|
||||||
* [kafka_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer)
|
|
||||||
* [mqtt_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mqtt_consumer)
|
* statsd
|
||||||
* [nats_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nats_consumer)
|
* kafka_consumer
|
||||||
* [nsq_consumer](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nsq_consumer)
|
|
||||||
* [logparser](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/logparser)
|
|
||||||
* [statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd)
|
|
||||||
* [tail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail)
|
|
||||||
* [tcp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tcp_listener)
|
|
||||||
* [udp_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/udp_listener)
|
|
||||||
* [webhooks](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks)
|
|
||||||
* [filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack)
|
|
||||||
* [github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github)
|
|
||||||
* [mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill)
|
|
||||||
* [rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar)
|
|
||||||
|
|
||||||
We'll be adding support for many more over the coming months. Read on if you
|
We'll be adding support for many more over the coming months. Read on if you
|
||||||
want to add support for another service or third-party API.
|
want to add support for another service or third-party API.
|
||||||
|
|
||||||
## Supported Output Plugins
|
## Supported Outputs
|
||||||
|
|
||||||
* [influxdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb)
|
* influxdb
|
||||||
* [amon](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amon)
|
* nsq
|
||||||
* [amqp](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/amqp)
|
* kafka
|
||||||
* [aws kinesis](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kinesis)
|
* datadog
|
||||||
* [aws cloudwatch](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch)
|
* opentsdb
|
||||||
* [datadog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/datadog)
|
* amqp (rabbitmq)
|
||||||
* [file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)
|
* mqtt
|
||||||
* [graphite](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graphite)
|
* librato
|
||||||
* [graylog](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/graylog)
|
* prometheus
|
||||||
* [instrumental](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/instrumental)
|
* amon
|
||||||
* [kafka](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/kafka)
|
* riemann
|
||||||
* [librato](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/librato)
|
|
||||||
* [mqtt](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/mqtt)
|
|
||||||
* [nats](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nats)
|
|
||||||
* [nsq](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/nsq)
|
|
||||||
* [opentsdb](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
|
||||||
* [prometheus](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client)
|
|
||||||
* [riemann](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/riemann)
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
Please see the
|
Please see the
|
||||||
[contributing guide](CONTRIBUTING.md)
|
[contributing guide](CONTRIBUTING.md)
|
||||||
for details on contributing a plugin to Telegraf.
|
for details on contributing a plugin or output to Telegraf.
|
||||||
|
|||||||
204
accumulator.go
204
accumulator.go
@@ -1,40 +1,184 @@
|
|||||||
package telegraf
|
package telegraf
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/telegraf/internal/config"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
)
|
||||||
|
|
||||||
// Accumulator is an interface for "accumulating" metrics from input plugin(s).
|
|
||||||
// The metrics are sent down a channel shared between all input plugins and then
|
|
||||||
// flushed on the configured flush_interval.
|
|
||||||
type Accumulator interface {
|
type Accumulator interface {
|
||||||
// AddFields adds a metric to the accumulator with the given measurement
|
Add(measurement string, value interface{},
|
||||||
// name, fields, and tags (and timestamp). If a timestamp is not provided,
|
tags map[string]string, t ...time.Time)
|
||||||
// then the accumulator sets it to "now".
|
AddFields(measurement string, fields map[string]interface{},
|
||||||
// Create a point with a value, decorating it with tags
|
tags map[string]string, t ...time.Time)
|
||||||
// NOTE: tags is expected to be owned by the caller, don't mutate
|
|
||||||
// it after passing to Add.
|
|
||||||
AddFields(measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
t ...time.Time)
|
|
||||||
|
|
||||||
// AddGauge is the same as AddFields, but will add the metric as a "Gauge" type
|
SetDefaultTags(tags map[string]string)
|
||||||
AddGauge(measurement string,
|
AddDefaultTag(key, value string)
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
t ...time.Time)
|
|
||||||
|
|
||||||
// AddCounter is the same as AddFields, but will add the metric as a "Counter" type
|
Prefix() string
|
||||||
AddCounter(measurement string,
|
SetPrefix(prefix string)
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
t ...time.Time)
|
|
||||||
|
|
||||||
AddError(err error)
|
|
||||||
|
|
||||||
Debug() bool
|
Debug() bool
|
||||||
SetDebug(enabled bool)
|
SetDebug(enabled bool)
|
||||||
|
}
|
||||||
SetPrecision(precision, interval time.Duration)
|
|
||||||
|
func NewAccumulator(
|
||||||
DisablePrecision()
|
pluginConfig *config.PluginConfig,
|
||||||
|
points chan *client.Point,
|
||||||
|
) Accumulator {
|
||||||
|
acc := accumulator{}
|
||||||
|
acc.points = points
|
||||||
|
acc.pluginConfig = pluginConfig
|
||||||
|
return &acc
|
||||||
|
}
|
||||||
|
|
||||||
|
type accumulator struct {
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
points chan *client.Point
|
||||||
|
|
||||||
|
defaultTags map[string]string
|
||||||
|
|
||||||
|
debug bool
|
||||||
|
|
||||||
|
pluginConfig *config.PluginConfig
|
||||||
|
|
||||||
|
prefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) Add(
|
||||||
|
measurement string,
|
||||||
|
value interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
t ...time.Time,
|
||||||
|
) {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
fields["value"] = value
|
||||||
|
ac.AddFields(measurement, fields, tags, t...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) AddFields(
|
||||||
|
measurement string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
t ...time.Time,
|
||||||
|
) {
|
||||||
|
if !ac.pluginConfig.Filter.ShouldTagsPass(tags) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override measurement name if set
|
||||||
|
if len(ac.pluginConfig.NameOverride) != 0 {
|
||||||
|
measurement = ac.pluginConfig.NameOverride
|
||||||
|
}
|
||||||
|
// Apply measurement prefix and suffix if set
|
||||||
|
if len(ac.pluginConfig.MeasurementPrefix) != 0 {
|
||||||
|
measurement = ac.pluginConfig.MeasurementPrefix + measurement
|
||||||
|
}
|
||||||
|
if len(ac.pluginConfig.MeasurementSuffix) != 0 {
|
||||||
|
measurement = measurement + ac.pluginConfig.MeasurementSuffix
|
||||||
|
}
|
||||||
|
|
||||||
|
if tags == nil {
|
||||||
|
tags = make(map[string]string)
|
||||||
|
}
|
||||||
|
// Apply plugin-wide tags if set
|
||||||
|
for k, v := range ac.pluginConfig.Tags {
|
||||||
|
if _, ok := tags[k]; !ok {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Apply daemon-wide tags if set
|
||||||
|
for k, v := range ac.defaultTags {
|
||||||
|
if _, ok := tags[k]; !ok {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make(map[string]interface{})
|
||||||
|
for k, v := range fields {
|
||||||
|
// Filter out any filtered fields
|
||||||
|
if ac.pluginConfig != nil {
|
||||||
|
if !ac.pluginConfig.Filter.ShouldPass(k) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result[k] = v
|
||||||
|
|
||||||
|
// Validate uint64 and float64 fields
|
||||||
|
switch val := v.(type) {
|
||||||
|
case uint64:
|
||||||
|
// InfluxDB does not support writing uint64
|
||||||
|
if val < uint64(9223372036854775808) {
|
||||||
|
result[k] = int64(val)
|
||||||
|
} else {
|
||||||
|
result[k] = int64(9223372036854775807)
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
// NaNs are invalid values in influxdb, skip measurement
|
||||||
|
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||||
|
if ac.debug {
|
||||||
|
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||||
|
"field, skipping",
|
||||||
|
measurement, k)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fields = nil
|
||||||
|
if len(result) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var timestamp time.Time
|
||||||
|
if len(t) > 0 {
|
||||||
|
timestamp = t[0]
|
||||||
|
} else {
|
||||||
|
timestamp = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ac.prefix != "" {
|
||||||
|
measurement = ac.prefix + measurement
|
||||||
|
}
|
||||||
|
|
||||||
|
pt, err := client.NewPoint(measurement, tags, result, timestamp)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ac.debug {
|
||||||
|
fmt.Println("> " + pt.String())
|
||||||
|
}
|
||||||
|
ac.points <- pt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) SetDefaultTags(tags map[string]string) {
|
||||||
|
ac.defaultTags = tags
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) AddDefaultTag(key, value string) {
|
||||||
|
ac.defaultTags[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) Prefix() string {
|
||||||
|
return ac.prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) SetPrefix(prefix string) {
|
||||||
|
ac.prefix = prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) Debug() bool {
|
||||||
|
return ac.debug
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ac *accumulator) SetDebug(debug bool) {
|
||||||
|
ac.debug = debug
|
||||||
}
|
}
|
||||||
|
|||||||
397
agent.go
Normal file
397
agent.go
Normal file
@@ -0,0 +1,397 @@
|
|||||||
|
package telegraf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/telegraf/internal/config"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
"github.com/influxdb/telegraf/plugins"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Agent runs telegraf and collects data based on the given config
|
||||||
|
type Agent struct {
|
||||||
|
Config *config.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAgent returns an Agent struct based off the given Config
|
||||||
|
func NewAgent(config *config.Config) (*Agent, error) {
|
||||||
|
a := &Agent{
|
||||||
|
Config: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Config.Agent.Hostname == "" {
|
||||||
|
hostname, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
a.Config.Agent.Hostname = hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
config.Tags["host"] = a.Config.Agent.Hostname
|
||||||
|
|
||||||
|
return a, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect connects to all configured outputs
|
||||||
|
func (a *Agent) Connect() error {
|
||||||
|
for _, o := range a.Config.Outputs {
|
||||||
|
switch ot := o.Output.(type) {
|
||||||
|
case outputs.ServiceOutput:
|
||||||
|
if err := ot.Start(); err != nil {
|
||||||
|
log.Printf("Service for output %s failed to start, exiting\n%s\n",
|
||||||
|
o.Name, err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Config.Agent.Debug {
|
||||||
|
log.Printf("Attempting connection to output: %s\n", o.Name)
|
||||||
|
}
|
||||||
|
err := o.Output.Connect()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to connect to output %s, retrying in 15s\n", o.Name)
|
||||||
|
time.Sleep(15 * time.Second)
|
||||||
|
err = o.Output.Connect()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if a.Config.Agent.Debug {
|
||||||
|
log.Printf("Successfully connected to output: %s\n", o.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection to all configured outputs
|
||||||
|
func (a *Agent) Close() error {
|
||||||
|
var err error
|
||||||
|
for _, o := range a.Config.Outputs {
|
||||||
|
err = o.Output.Close()
|
||||||
|
switch ot := o.Output.(type) {
|
||||||
|
case outputs.ServiceOutput:
|
||||||
|
ot.Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// gatherParallel runs the plugins that are using the same reporting interval
|
||||||
|
// as the telegraf agent.
|
||||||
|
func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
counter := 0
|
||||||
|
for _, plugin := range a.Config.Plugins {
|
||||||
|
if plugin.Config.Interval != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
counter++
|
||||||
|
go func(plugin *config.RunningPlugin) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
acc := NewAccumulator(plugin.Config, pointChan)
|
||||||
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
// acc.SetPrefix(plugin.Name + "_")
|
||||||
|
acc.SetDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
|
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||||
|
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}(plugin)
|
||||||
|
}
|
||||||
|
|
||||||
|
if counter == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
log.Printf("Gathered metrics, (%s interval), from %d plugins in %s\n",
|
||||||
|
a.Config.Agent.Interval, counter, elapsed)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// gatherSeparate runs the plugins that have been configured with their own
|
||||||
|
// reporting interval.
|
||||||
|
func (a *Agent) gatherSeparate(
|
||||||
|
shutdown chan struct{},
|
||||||
|
plugin *config.RunningPlugin,
|
||||||
|
pointChan chan *client.Point,
|
||||||
|
) error {
|
||||||
|
ticker := time.NewTicker(plugin.Config.Interval)
|
||||||
|
|
||||||
|
for {
|
||||||
|
var outerr error
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
acc := NewAccumulator(plugin.Config, pointChan)
|
||||||
|
acc.SetDebug(a.Config.Agent.Debug)
|
||||||
|
// acc.SetPrefix(plugin.Name + "_")
|
||||||
|
acc.SetDefaultTags(a.Config.Tags)
|
||||||
|
|
||||||
|
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||||
|
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
|
||||||
|
plugin.Config.Interval, plugin.Name, elapsed)
|
||||||
|
|
||||||
|
if outerr != nil {
|
||||||
|
return outerr
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-shutdown:
|
||||||
|
return nil
|
||||||
|
case <-ticker.C:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test verifies that we can 'Gather' from all plugins with their configured
|
||||||
|
// Config struct
|
||||||
|
func (a *Agent) Test() error {
|
||||||
|
shutdown := make(chan struct{})
|
||||||
|
defer close(shutdown)
|
||||||
|
pointChan := make(chan *client.Point)
|
||||||
|
|
||||||
|
// dummy receiver for the point channel
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-pointChan:
|
||||||
|
// do nothing
|
||||||
|
case <-shutdown:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, plugin := range a.Config.Plugins {
|
||||||
|
acc := NewAccumulator(plugin.Config, pointChan)
|
||||||
|
acc.SetDebug(true)
|
||||||
|
// acc.SetPrefix(plugin.Name + "_")
|
||||||
|
|
||||||
|
fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name)
|
||||||
|
if plugin.Config.Interval != 0 {
|
||||||
|
fmt.Printf("* Internal: %s\n", plugin.Config.Interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special instructions for some plugins. cpu, for example, needs to be
|
||||||
|
// run twice in order to return cpu usage percentages.
|
||||||
|
switch plugin.Name {
|
||||||
|
case "cpu", "mongodb":
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
fmt.Printf("* Plugin: %s, Collection 2\n", plugin.Name)
|
||||||
|
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeOutput writes a list of points to a single output, with retries.
|
||||||
|
// Optionally takes a `done` channel to indicate that it is done writing.
|
||||||
|
func (a *Agent) writeOutput(
|
||||||
|
points []*client.Point,
|
||||||
|
ro *config.RunningOutput,
|
||||||
|
shutdown chan struct{},
|
||||||
|
wg *sync.WaitGroup,
|
||||||
|
) {
|
||||||
|
defer wg.Done()
|
||||||
|
if len(points) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
retry := 0
|
||||||
|
retries := a.Config.Agent.FlushRetries
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
for {
|
||||||
|
filtered := ro.FilterPoints(points)
|
||||||
|
err := ro.Output.Write(filtered)
|
||||||
|
if err == nil {
|
||||||
|
// Write successful
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
log.Printf("Flushed %d metrics to output %s in %s\n",
|
||||||
|
len(filtered), ro.Name, elapsed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-shutdown:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
if retry >= retries {
|
||||||
|
// No more retries
|
||||||
|
msg := "FATAL: Write to output [%s] failed %d times, dropping" +
|
||||||
|
" %d metrics\n"
|
||||||
|
log.Printf(msg, ro.Name, retries+1, len(points))
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
// Sleep for a retry
|
||||||
|
log.Printf("Error in output [%s]: %s, retrying in %s",
|
||||||
|
ro.Name, err.Error(), a.Config.Agent.FlushInterval.Duration)
|
||||||
|
time.Sleep(a.Config.Agent.FlushInterval.Duration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
retry++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush writes a list of points to all configured outputs
|
||||||
|
func (a *Agent) flush(
|
||||||
|
points []*client.Point,
|
||||||
|
shutdown chan struct{},
|
||||||
|
wait bool,
|
||||||
|
) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for _, o := range a.Config.Outputs {
|
||||||
|
wg.Add(1)
|
||||||
|
go a.writeOutput(points, o, shutdown, &wg)
|
||||||
|
}
|
||||||
|
if wait {
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// flusher monitors the points input channel and flushes on the minimum interval
|
||||||
|
func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) error {
|
||||||
|
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
||||||
|
// the flusher will flush after metrics are collected.
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
||||||
|
points := make([]*client.Point, 0)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-shutdown:
|
||||||
|
log.Println("Hang on, flushing any cached points before shutdown")
|
||||||
|
a.flush(points, shutdown, true)
|
||||||
|
return nil
|
||||||
|
case <-ticker.C:
|
||||||
|
a.flush(points, shutdown, false)
|
||||||
|
points = make([]*client.Point, 0)
|
||||||
|
case pt := <-pointChan:
|
||||||
|
points = append(points, pt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// jitterInterval applies the the interval jitter to the flush interval using
|
||||||
|
// crypto/rand number generator
|
||||||
|
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
|
||||||
|
var jitter int64
|
||||||
|
outinterval := ininterval
|
||||||
|
if injitter.Nanoseconds() != 0 {
|
||||||
|
maxjitter := big.NewInt(injitter.Nanoseconds())
|
||||||
|
if j, err := rand.Int(rand.Reader, maxjitter); err == nil {
|
||||||
|
jitter = j.Int64()
|
||||||
|
}
|
||||||
|
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
|
||||||
|
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
|
||||||
|
outinterval = time.Duration(500 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
return outinterval
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run runs the agent daemon, gathering every Interval
|
||||||
|
func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
a.Config.Agent.FlushInterval.Duration = jitterInterval(a.Config.Agent.FlushInterval.Duration,
|
||||||
|
a.Config.Agent.FlushJitter.Duration)
|
||||||
|
|
||||||
|
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
|
||||||
|
"Flush Interval:%s\n",
|
||||||
|
a.Config.Agent.Interval, a.Config.Agent.Debug,
|
||||||
|
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval)
|
||||||
|
|
||||||
|
// channel shared between all plugin threads for accumulating points
|
||||||
|
pointChan := make(chan *client.Point, 1000)
|
||||||
|
|
||||||
|
// Round collection to nearest interval by sleeping
|
||||||
|
if a.Config.Agent.RoundInterval {
|
||||||
|
i := int64(a.Config.Agent.Interval.Duration)
|
||||||
|
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
||||||
|
}
|
||||||
|
ticker := time.NewTicker(a.Config.Agent.Interval.Duration)
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := a.flusher(shutdown, pointChan); err != nil {
|
||||||
|
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
|
||||||
|
close(shutdown)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, plugin := range a.Config.Plugins {
|
||||||
|
|
||||||
|
// Start service of any ServicePlugins
|
||||||
|
switch p := plugin.Plugin.(type) {
|
||||||
|
case plugins.ServicePlugin:
|
||||||
|
if err := p.Start(); err != nil {
|
||||||
|
log.Printf("Service for plugin %s failed to start, exiting\n%s\n",
|
||||||
|
plugin.Name, err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer p.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special handling for plugins that have their own collection interval
|
||||||
|
// configured. Default intervals are handled below with gatherParallel
|
||||||
|
if plugin.Config.Interval != 0 {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(plugin *config.RunningPlugin) {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := a.gatherSeparate(shutdown, plugin, pointChan); err != nil {
|
||||||
|
log.Printf(err.Error())
|
||||||
|
}
|
||||||
|
}(plugin)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer wg.Wait()
|
||||||
|
|
||||||
|
for {
|
||||||
|
if err := a.gatherParallel(pointChan); err != nil {
|
||||||
|
log.Printf(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-shutdown:
|
||||||
|
return nil
|
||||||
|
case <-ticker.C:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,238 +0,0 @@
|
|||||||
package agent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/internal/models"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewAccumulator(
|
|
||||||
inputConfig *models.InputConfig,
|
|
||||||
metrics chan telegraf.Metric,
|
|
||||||
) *accumulator {
|
|
||||||
acc := accumulator{}
|
|
||||||
acc.metrics = metrics
|
|
||||||
acc.inputConfig = inputConfig
|
|
||||||
acc.precision = time.Nanosecond
|
|
||||||
return &acc
|
|
||||||
}
|
|
||||||
|
|
||||||
type accumulator struct {
|
|
||||||
metrics chan telegraf.Metric
|
|
||||||
|
|
||||||
defaultTags map[string]string
|
|
||||||
|
|
||||||
debug bool
|
|
||||||
// print every point added to the accumulator
|
|
||||||
trace bool
|
|
||||||
|
|
||||||
inputConfig *models.InputConfig
|
|
||||||
|
|
||||||
precision time.Duration
|
|
||||||
|
|
||||||
errCount uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) AddFields(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
t ...time.Time,
|
|
||||||
) {
|
|
||||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Untyped, t...); m != nil {
|
|
||||||
ac.metrics <- m
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) AddGauge(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
t ...time.Time,
|
|
||||||
) {
|
|
||||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Gauge, t...); m != nil {
|
|
||||||
ac.metrics <- m
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) AddCounter(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
t ...time.Time,
|
|
||||||
) {
|
|
||||||
if m := ac.makeMetric(measurement, fields, tags, telegraf.Counter, t...); m != nil {
|
|
||||||
ac.metrics <- m
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeMetric either returns a metric, or returns nil if the metric doesn't
|
|
||||||
// need to be created (because of filtering, an error, etc.)
|
|
||||||
func (ac *accumulator) makeMetric(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
mType telegraf.ValueType,
|
|
||||||
t ...time.Time,
|
|
||||||
) telegraf.Metric {
|
|
||||||
if len(fields) == 0 || len(measurement) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if tags == nil {
|
|
||||||
tags = make(map[string]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Override measurement name if set
|
|
||||||
if len(ac.inputConfig.NameOverride) != 0 {
|
|
||||||
measurement = ac.inputConfig.NameOverride
|
|
||||||
}
|
|
||||||
// Apply measurement prefix and suffix if set
|
|
||||||
if len(ac.inputConfig.MeasurementPrefix) != 0 {
|
|
||||||
measurement = ac.inputConfig.MeasurementPrefix + measurement
|
|
||||||
}
|
|
||||||
if len(ac.inputConfig.MeasurementSuffix) != 0 {
|
|
||||||
measurement = measurement + ac.inputConfig.MeasurementSuffix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply plugin-wide tags if set
|
|
||||||
for k, v := range ac.inputConfig.Tags {
|
|
||||||
if _, ok := tags[k]; !ok {
|
|
||||||
tags[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Apply daemon-wide tags if set
|
|
||||||
for k, v := range ac.defaultTags {
|
|
||||||
if _, ok := tags[k]; !ok {
|
|
||||||
tags[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply the metric filter(s)
|
|
||||||
if ok := ac.inputConfig.Filter.Apply(measurement, fields, tags); !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range fields {
|
|
||||||
// Validate uint64 and float64 fields
|
|
||||||
switch val := v.(type) {
|
|
||||||
case uint64:
|
|
||||||
// InfluxDB does not support writing uint64
|
|
||||||
if val < uint64(9223372036854775808) {
|
|
||||||
fields[k] = int64(val)
|
|
||||||
} else {
|
|
||||||
fields[k] = int64(9223372036854775807)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
case float64:
|
|
||||||
// NaNs are invalid values in influxdb, skip measurement
|
|
||||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
|
||||||
if ac.debug {
|
|
||||||
log.Printf("I! Measurement [%s] field [%s] has a NaN or Inf "+
|
|
||||||
"field, skipping",
|
|
||||||
measurement, k)
|
|
||||||
}
|
|
||||||
delete(fields, k)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fields[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
var timestamp time.Time
|
|
||||||
if len(t) > 0 {
|
|
||||||
timestamp = t[0]
|
|
||||||
} else {
|
|
||||||
timestamp = time.Now()
|
|
||||||
}
|
|
||||||
timestamp = timestamp.Round(ac.precision)
|
|
||||||
|
|
||||||
var m telegraf.Metric
|
|
||||||
var err error
|
|
||||||
switch mType {
|
|
||||||
case telegraf.Counter:
|
|
||||||
m, err = telegraf.NewCounterMetric(measurement, tags, fields, timestamp)
|
|
||||||
case telegraf.Gauge:
|
|
||||||
m, err = telegraf.NewGaugeMetric(measurement, tags, fields, timestamp)
|
|
||||||
default:
|
|
||||||
m, err = telegraf.NewMetric(measurement, tags, fields, timestamp)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! Error adding point [%s]: %s\n", measurement, err.Error())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if ac.trace {
|
|
||||||
fmt.Println("> " + m.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddError passes a runtime error to the accumulator.
|
|
||||||
// The error will be tagged with the plugin name and written to the log.
|
|
||||||
func (ac *accumulator) AddError(err error) {
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
atomic.AddUint64(&ac.errCount, 1)
|
|
||||||
//TODO suppress/throttle consecutive duplicate errors?
|
|
||||||
log.Printf("E! Error in input [%s]: %s", ac.inputConfig.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) Debug() bool {
|
|
||||||
return ac.debug
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) SetDebug(debug bool) {
|
|
||||||
ac.debug = debug
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) Trace() bool {
|
|
||||||
return ac.trace
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) SetTrace(trace bool) {
|
|
||||||
ac.trace = trace
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPrecision takes two time.Duration objects. If the first is non-zero,
|
|
||||||
// it sets that as the precision. Otherwise, it takes the second argument
|
|
||||||
// as the order of time that the metrics should be rounded to, with the
|
|
||||||
// maximum being 1s.
|
|
||||||
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
|
|
||||||
if precision > 0 {
|
|
||||||
ac.precision = precision
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case interval >= time.Second:
|
|
||||||
ac.precision = time.Second
|
|
||||||
case interval >= time.Millisecond:
|
|
||||||
ac.precision = time.Millisecond
|
|
||||||
case interval >= time.Microsecond:
|
|
||||||
ac.precision = time.Microsecond
|
|
||||||
default:
|
|
||||||
ac.precision = time.Nanosecond
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) DisablePrecision() {
|
|
||||||
ac.precision = time.Nanosecond
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) setDefaultTags(tags map[string]string) {
|
|
||||||
ac.defaultTags = tags
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) addDefaultTag(key, value string) {
|
|
||||||
if ac.defaultTags == nil {
|
|
||||||
ac.defaultTags = make(map[string]string)
|
|
||||||
}
|
|
||||||
ac.defaultTags[key] = value
|
|
||||||
}
|
|
||||||
@@ -1,614 +0,0 @@
|
|||||||
package agent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/internal/models"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAdd(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddGauge(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.AddGauge("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{})
|
|
||||||
a.AddGauge("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddGauge("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest value=101")
|
|
||||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
|
||||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddCounter(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.AddCounter("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{})
|
|
||||||
a.AddCounter("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddCounter("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest value=101")
|
|
||||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
|
||||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
assert.Equal(t, testm.Type(), telegraf.Counter)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.SetPrecision(0, time.Second)
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.SetPrecision(time.Second, time.Millisecond)
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddDisablePrecision(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.SetPrecision(time.Second, time.Millisecond)
|
|
||||||
a.DisablePrecision()
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDifferentPrecisions(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.SetPrecision(0, time.Second)
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800000000000)),
|
|
||||||
actual)
|
|
||||||
|
|
||||||
a.SetPrecision(0, time.Millisecond)
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800083000000)),
|
|
||||||
actual)
|
|
||||||
|
|
||||||
a.SetPrecision(0, time.Microsecond)
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082913000)),
|
|
||||||
actual)
|
|
||||||
|
|
||||||
a.SetPrecision(0, time.Nanosecond)
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test value=101 %d", int64(1139572800082912748)),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddDefaultTags(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
a.addDefaultTag("default", "tag")
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,default=tag value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddFields(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage": float64(99),
|
|
||||||
}
|
|
||||||
a.AddFields("acctest", fields, map[string]string{})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest usage=99")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test usage=99 %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that all Inf fields get dropped, and not added to metrics channel
|
|
||||||
func TestAddInfFields(t *testing.T) {
|
|
||||||
inf := math.Inf(1)
|
|
||||||
ninf := math.Inf(-1)
|
|
||||||
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage": inf,
|
|
||||||
"nusage": ninf,
|
|
||||||
}
|
|
||||||
a.AddFields("acctest", fields, map[string]string{})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
assert.Len(t, a.metrics, 0)
|
|
||||||
|
|
||||||
// test that non-inf fields are kept and not dropped
|
|
||||||
fields["notinf"] = float64(100)
|
|
||||||
a.AddFields("acctest", fields, map[string]string{})
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest notinf=100")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that nan fields are dropped and not added
|
|
||||||
func TestAddNaNFields(t *testing.T) {
|
|
||||||
nan := math.NaN()
|
|
||||||
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage": nan,
|
|
||||||
}
|
|
||||||
a.AddFields("acctest", fields, map[string]string{})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
assert.Len(t, a.metrics, 0)
|
|
||||||
|
|
||||||
// test that non-nan fields are kept and not dropped
|
|
||||||
fields["notnan"] = float64(100)
|
|
||||||
a.AddFields("acctest", fields, map[string]string{})
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest notnan=100")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddUint64Fields(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage": uint64(99),
|
|
||||||
}
|
|
||||||
a.AddFields("acctest", fields, map[string]string{})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest usage=99i")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test usage=99i")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test usage=99i %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddUint64Overflow(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage": uint64(9223372036854775808),
|
|
||||||
}
|
|
||||||
a.AddFields("acctest", fields, map[string]string{})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest", fields, map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest usage=9223372036854775807i")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test usage=9223372036854775807i")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test usage=9223372036854775807i %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddInts(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
a.addDefaultTag("default", "tag")
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": int(101)},
|
|
||||||
map[string]string{})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": int32(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": int64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,default=tag value=101i")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101i")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test,default=tag value=101i %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddFloats(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
a.addDefaultTag("default", "tag")
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float32(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test,default=tag value=101 %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddStrings(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
a.addDefaultTag("default", "tag")
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": "test"},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": "foo"},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=\"test\"")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test,default=tag value=\"foo\" %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddBools(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
a.addDefaultTag("default", "tag")
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": true}, map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": false}, map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest,acc=test,default=tag value=true")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest,acc=test,default=tag value=false %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that tag filters get applied to metrics.
|
|
||||||
func TestAccFilterTags(t *testing.T) {
|
|
||||||
a := accumulator{}
|
|
||||||
now := time.Now()
|
|
||||||
a.metrics = make(chan telegraf.Metric, 10)
|
|
||||||
defer close(a.metrics)
|
|
||||||
filter := models.Filter{
|
|
||||||
TagExclude: []string{"acc"},
|
|
||||||
}
|
|
||||||
assert.NoError(t, filter.Compile())
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
a.inputConfig.Filter = filter
|
|
||||||
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"})
|
|
||||||
a.AddFields("acctest",
|
|
||||||
map[string]interface{}{"value": float64(101)},
|
|
||||||
map[string]string{"acc": "test"}, now)
|
|
||||||
|
|
||||||
testm := <-a.metrics
|
|
||||||
actual := testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Contains(t, actual, "acctest value=101")
|
|
||||||
|
|
||||||
testm = <-a.metrics
|
|
||||||
actual = testm.String()
|
|
||||||
assert.Equal(t,
|
|
||||||
fmt.Sprintf("acctest value=101 %d", now.UnixNano()),
|
|
||||||
actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccAddError(t *testing.T) {
|
|
||||||
errBuf := bytes.NewBuffer(nil)
|
|
||||||
log.SetOutput(errBuf)
|
|
||||||
defer log.SetOutput(os.Stderr)
|
|
||||||
|
|
||||||
a := accumulator{}
|
|
||||||
a.inputConfig = &models.InputConfig{}
|
|
||||||
a.inputConfig.Name = "mock_plugin"
|
|
||||||
|
|
||||||
a.AddError(fmt.Errorf("foo"))
|
|
||||||
a.AddError(fmt.Errorf("bar"))
|
|
||||||
a.AddError(fmt.Errorf("baz"))
|
|
||||||
|
|
||||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
|
||||||
assert.EqualValues(t, 3, a.errCount)
|
|
||||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
|
||||||
assert.Contains(t, string(errs[0]), "mock_plugin")
|
|
||||||
assert.Contains(t, string(errs[0]), "foo")
|
|
||||||
assert.Contains(t, string(errs[1]), "mock_plugin")
|
|
||||||
assert.Contains(t, string(errs[1]), "bar")
|
|
||||||
assert.Contains(t, string(errs[2]), "mock_plugin")
|
|
||||||
assert.Contains(t, string(errs[2]), "baz")
|
|
||||||
}
|
|
||||||
356
agent/agent.go
356
agent/agent.go
@@ -1,356 +0,0 @@
|
|||||||
package agent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/internal"
|
|
||||||
"github.com/influxdata/telegraf/internal/config"
|
|
||||||
"github.com/influxdata/telegraf/internal/models"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Agent runs telegraf and collects data based on the given config
|
|
||||||
type Agent struct {
|
|
||||||
Config *config.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAgent returns an Agent struct based off the given Config
|
|
||||||
func NewAgent(config *config.Config) (*Agent, error) {
|
|
||||||
a := &Agent{
|
|
||||||
Config: config,
|
|
||||||
}
|
|
||||||
|
|
||||||
if !a.Config.Agent.OmitHostname {
|
|
||||||
if a.Config.Agent.Hostname == "" {
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
a.Config.Agent.Hostname = hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
config.Tags["host"] = a.Config.Agent.Hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect connects to all configured outputs
|
|
||||||
func (a *Agent) Connect() error {
|
|
||||||
for _, o := range a.Config.Outputs {
|
|
||||||
o.Quiet = a.Config.Agent.Quiet
|
|
||||||
|
|
||||||
switch ot := o.Output.(type) {
|
|
||||||
case telegraf.ServiceOutput:
|
|
||||||
if err := ot.Start(); err != nil {
|
|
||||||
log.Printf("E! Service for output %s failed to start, exiting\n%s\n",
|
|
||||||
o.Name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("D! Attempting connection to output: %s\n", o.Name)
|
|
||||||
err := o.Output.Connect()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! Failed to connect to output %s, retrying in 15s, "+
|
|
||||||
"error was '%s' \n", o.Name, err)
|
|
||||||
time.Sleep(15 * time.Second)
|
|
||||||
err = o.Output.Connect()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Printf("D! Successfully connected to output: %s\n", o.Name)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the connection to all configured outputs
|
|
||||||
func (a *Agent) Close() error {
|
|
||||||
var err error
|
|
||||||
for _, o := range a.Config.Outputs {
|
|
||||||
err = o.Output.Close()
|
|
||||||
switch ot := o.Output.(type) {
|
|
||||||
case telegraf.ServiceOutput:
|
|
||||||
ot.Stop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func panicRecover(input *models.RunningInput) {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
trace := make([]byte, 2048)
|
|
||||||
runtime.Stack(trace, true)
|
|
||||||
log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n",
|
|
||||||
input.Name, err, trace)
|
|
||||||
log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " +
|
|
||||||
"stack trace, configuration, and OS information: " +
|
|
||||||
"https://github.com/influxdata/telegraf/issues/new")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// gatherer runs the inputs that have been configured with their own
|
|
||||||
// reporting interval.
|
|
||||||
func (a *Agent) gatherer(
|
|
||||||
shutdown chan struct{},
|
|
||||||
input *models.RunningInput,
|
|
||||||
interval time.Duration,
|
|
||||||
metricC chan telegraf.Metric,
|
|
||||||
) error {
|
|
||||||
defer panicRecover(input)
|
|
||||||
|
|
||||||
ticker := time.NewTicker(interval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
var outerr error
|
|
||||||
|
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
|
||||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
|
||||||
a.Config.Agent.Interval.Duration)
|
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
|
||||||
|
|
||||||
internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown)
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
gatherWithTimeout(shutdown, input, acc, interval)
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
|
|
||||||
if outerr != nil {
|
|
||||||
return outerr
|
|
||||||
}
|
|
||||||
log.Printf("D! Input [%s] gathered metrics, (%s interval) in %s\n",
|
|
||||||
input.Name, interval, elapsed)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-shutdown:
|
|
||||||
return nil
|
|
||||||
case <-ticker.C:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// gatherWithTimeout gathers from the given input, with the given timeout.
|
|
||||||
// when the given timeout is reached, gatherWithTimeout logs an error message
|
|
||||||
// but continues waiting for it to return. This is to avoid leaving behind
|
|
||||||
// hung processes, and to prevent re-calling the same hung process over and
|
|
||||||
// over.
|
|
||||||
func gatherWithTimeout(
|
|
||||||
shutdown chan struct{},
|
|
||||||
input *models.RunningInput,
|
|
||||||
acc *accumulator,
|
|
||||||
timeout time.Duration,
|
|
||||||
) {
|
|
||||||
ticker := time.NewTicker(timeout)
|
|
||||||
defer ticker.Stop()
|
|
||||||
done := make(chan error)
|
|
||||||
go func() {
|
|
||||||
done <- input.Input.Gather(acc)
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case err := <-done:
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! ERROR in input [%s]: %s", input.Name, err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
log.Printf("E! ERROR: input [%s] took longer to collect than "+
|
|
||||||
"collection interval (%s)",
|
|
||||||
input.Name, timeout)
|
|
||||||
continue
|
|
||||||
case <-shutdown:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test verifies that we can 'Gather' from all inputs with their configured
|
|
||||||
// Config struct
|
|
||||||
func (a *Agent) Test() error {
|
|
||||||
shutdown := make(chan struct{})
|
|
||||||
defer close(shutdown)
|
|
||||||
metricC := make(chan telegraf.Metric)
|
|
||||||
|
|
||||||
// dummy receiver for the point channel
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-metricC:
|
|
||||||
// do nothing
|
|
||||||
case <-shutdown:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for _, input := range a.Config.Inputs {
|
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
|
||||||
acc.SetTrace(true)
|
|
||||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
|
||||||
a.Config.Agent.Interval.Duration)
|
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
|
||||||
|
|
||||||
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
|
|
||||||
if input.Config.Interval != 0 {
|
|
||||||
fmt.Printf("* Internal: %s\n", input.Config.Interval)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := input.Input.Gather(acc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if acc.errCount > 0 {
|
|
||||||
return fmt.Errorf("Errors encountered during processing")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special instructions for some inputs. cpu, for example, needs to be
|
|
||||||
// run twice in order to return cpu usage percentages.
|
|
||||||
switch input.Name {
|
|
||||||
case "cpu", "mongodb", "procstat":
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
|
|
||||||
if err := input.Input.Gather(acc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// flush writes a list of metrics to all configured outputs
|
|
||||||
func (a *Agent) flush() {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
wg.Add(len(a.Config.Outputs))
|
|
||||||
for _, o := range a.Config.Outputs {
|
|
||||||
go func(output *models.RunningOutput) {
|
|
||||||
defer wg.Done()
|
|
||||||
err := output.Write()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! Error writing to output [%s]: %s\n",
|
|
||||||
output.Name, err.Error())
|
|
||||||
}
|
|
||||||
}(o)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// flusher monitors the metrics input channel and flushes on the minimum interval
|
|
||||||
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
|
|
||||||
// Inelegant, but this sleep is to allow the Gather threads to run, so that
|
|
||||||
// the flusher will flush after metrics are collected.
|
|
||||||
time.Sleep(time.Millisecond * 200)
|
|
||||||
|
|
||||||
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-shutdown:
|
|
||||||
log.Println("I! Hang on, flushing any cached metrics before shutdown")
|
|
||||||
a.flush()
|
|
||||||
return nil
|
|
||||||
case <-ticker.C:
|
|
||||||
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
|
|
||||||
a.flush()
|
|
||||||
case m := <-metricC:
|
|
||||||
for i, o := range a.Config.Outputs {
|
|
||||||
if i == len(a.Config.Outputs)-1 {
|
|
||||||
o.AddMetric(m)
|
|
||||||
} else {
|
|
||||||
o.AddMetric(copyMetric(m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyMetric(m telegraf.Metric) telegraf.Metric {
|
|
||||||
t := time.Time(m.Time())
|
|
||||||
|
|
||||||
tags := make(map[string]string)
|
|
||||||
fields := make(map[string]interface{})
|
|
||||||
for k, v := range m.Tags() {
|
|
||||||
tags[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range m.Fields() {
|
|
||||||
fields[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
out, _ := telegraf.NewMetric(m.Name(), tags, fields, t)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run runs the agent daemon, gathering every Interval
|
|
||||||
func (a *Agent) Run(shutdown chan struct{}) error {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+
|
|
||||||
"Flush Interval:%s \n",
|
|
||||||
a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet,
|
|
||||||
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
|
|
||||||
|
|
||||||
// channel shared between all input threads for accumulating metrics
|
|
||||||
metricC := make(chan telegraf.Metric, 10000)
|
|
||||||
|
|
||||||
for _, input := range a.Config.Inputs {
|
|
||||||
// Start service of any ServicePlugins
|
|
||||||
switch p := input.Input.(type) {
|
|
||||||
case telegraf.ServiceInput:
|
|
||||||
acc := NewAccumulator(input.Config, metricC)
|
|
||||||
// Service input plugins should set their own precision of their
|
|
||||||
// metrics.
|
|
||||||
acc.DisablePrecision()
|
|
||||||
acc.setDefaultTags(a.Config.Tags)
|
|
||||||
if err := p.Start(acc); err != nil {
|
|
||||||
log.Printf("E! Service for input %s failed to start, exiting\n%s\n",
|
|
||||||
input.Name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer p.Stop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Round collection to nearest interval by sleeping
|
|
||||||
if a.Config.Agent.RoundInterval {
|
|
||||||
i := int64(a.Config.Agent.Interval.Duration)
|
|
||||||
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := a.flusher(shutdown, metricC); err != nil {
|
|
||||||
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
|
|
||||||
close(shutdown)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
wg.Add(len(a.Config.Inputs))
|
|
||||||
for _, input := range a.Config.Inputs {
|
|
||||||
interval := a.Config.Agent.Interval.Duration
|
|
||||||
// overwrite global interval if this plugin has it's own.
|
|
||||||
if input.Config.Interval != 0 {
|
|
||||||
interval = input.Config.Interval
|
|
||||||
}
|
|
||||||
go func(in *models.RunningInput, interv time.Duration) {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := a.gatherer(shutdown, in, interv, metricC); err != nil {
|
|
||||||
log.Printf("E! " + err.Error())
|
|
||||||
}
|
|
||||||
}(input, interval)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,111 +0,0 @@
|
|||||||
package agent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/internal/config"
|
|
||||||
|
|
||||||
// needing to load the plugins
|
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
|
||||||
// needing to load the outputs
|
|
||||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAgent_OmitHostname(t *testing.T) {
|
|
||||||
c := config.NewConfig()
|
|
||||||
c.Agent.OmitHostname = true
|
|
||||||
_, err := NewAgent(c)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotContains(t, c.Tags, "host")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_LoadPlugin(t *testing.T) {
|
|
||||||
c := config.NewConfig()
|
|
||||||
c.InputFilters = []string{"mysql"}
|
|
||||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ := NewAgent(c)
|
|
||||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.InputFilters = []string{"foo"}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 0, len(a.Config.Inputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.InputFilters = []string{"mysql", "foo"}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.InputFilters = []string{"mysql", "redis"}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAgent_LoadOutput(t *testing.T) {
|
|
||||||
c := config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"influxdb"}
|
|
||||||
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ := NewAgent(c)
|
|
||||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"kafka"}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 1, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"foo"}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"influxdb", "foo"}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 3, len(c.Outputs))
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
|
||||||
|
|
||||||
c = config.NewConfig()
|
|
||||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
|
||||||
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
a, _ = NewAgent(c)
|
|
||||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
|
||||||
}
|
|
||||||
156
agent_test.go
Normal file
156
agent_test.go
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
package telegraf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/telegraf/internal/config"
|
||||||
|
|
||||||
|
// needing to load the plugins
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/all"
|
||||||
|
// needing to load the outputs
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/all"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAgent_LoadPlugin(t *testing.T) {
|
||||||
|
c := config.NewConfig()
|
||||||
|
c.PluginFilters = []string{"mysql"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ := NewAgent(c)
|
||||||
|
assert.Equal(t, 1, len(a.Config.Plugins))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.PluginFilters = []string{"foo"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 0, len(a.Config.Plugins))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.PluginFilters = []string{"mysql", "foo"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 1, len(a.Config.Plugins))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.PluginFilters = []string{"mysql", "redis"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 2, len(a.Config.Plugins))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.PluginFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 2, len(a.Config.Plugins))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_LoadOutput(t *testing.T) {
|
||||||
|
c := config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"influxdb"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ := NewAgent(c)
|
||||||
|
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"foo"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"influxdb", "foo"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||||
|
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||||
|
c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
|
||||||
|
a, _ = NewAgent(c)
|
||||||
|
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_ZeroJitter(t *testing.T) {
|
||||||
|
flushinterval := jitterInterval(time.Duration(10*time.Second),
|
||||||
|
time.Duration(0*time.Second))
|
||||||
|
|
||||||
|
actual := flushinterval.Nanoseconds()
|
||||||
|
exp := time.Duration(10 * time.Second).Nanoseconds()
|
||||||
|
|
||||||
|
if actual != exp {
|
||||||
|
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_ZeroInterval(t *testing.T) {
|
||||||
|
min := time.Duration(500 * time.Millisecond).Nanoseconds()
|
||||||
|
max := time.Duration(5 * time.Second).Nanoseconds()
|
||||||
|
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||||
|
time.Duration(5*time.Second))
|
||||||
|
actual := flushinterval.Nanoseconds()
|
||||||
|
|
||||||
|
if actual > max {
|
||||||
|
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if actual < min {
|
||||||
|
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_ZeroBoth(t *testing.T) {
|
||||||
|
flushinterval := jitterInterval(time.Duration(0*time.Second),
|
||||||
|
time.Duration(0*time.Second))
|
||||||
|
|
||||||
|
actual := flushinterval
|
||||||
|
exp := time.Duration(500 * time.Millisecond)
|
||||||
|
|
||||||
|
if actual != exp {
|
||||||
|
t.Errorf("Actual %v, expected %v", actual, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_JitterMax(t *testing.T) {
|
||||||
|
max := time.Duration(32 * time.Second).Nanoseconds()
|
||||||
|
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||||
|
time.Duration(2*time.Second))
|
||||||
|
actual := flushinterval.Nanoseconds()
|
||||||
|
if actual > max {
|
||||||
|
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_JitterMin(t *testing.T) {
|
||||||
|
min := time.Duration(30 * time.Second).Nanoseconds()
|
||||||
|
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
flushinterval := jitterInterval(time.Duration(30*time.Second),
|
||||||
|
time.Duration(2*time.Second))
|
||||||
|
actual := flushinterval.Nanoseconds()
|
||||||
|
if actual < min {
|
||||||
|
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
11
circle.yml
11
circle.yml
@@ -4,17 +4,16 @@ machine:
|
|||||||
post:
|
post:
|
||||||
- sudo service zookeeper stop
|
- sudo service zookeeper stop
|
||||||
- go version
|
- go version
|
||||||
- go version | grep 1.7.1 || sudo rm -rf /usr/local/go
|
- go version | grep 1.5.1 || sudo rm -rf /usr/local/go
|
||||||
- wget https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz
|
- wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
|
||||||
- sudo tar -C /usr/local -xzf go1.7.1.linux-amd64.tar.gz
|
- sudo tar -C /usr/local -xzf go1.5.1.linux-amd64.tar.gz
|
||||||
- go version
|
- go version
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
|
cache_directories:
|
||||||
|
- "~/telegraf-build/src"
|
||||||
override:
|
override:
|
||||||
- docker info
|
- docker info
|
||||||
post:
|
|
||||||
- gem install fpm
|
|
||||||
- sudo apt-get install -y rpm python-boto
|
|
||||||
|
|
||||||
test:
|
test:
|
||||||
override:
|
override:
|
||||||
|
|||||||
@@ -6,324 +6,147 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/agent"
|
"github.com/influxdb/telegraf"
|
||||||
"github.com/influxdata/telegraf/internal/config"
|
"github.com/influxdb/telegraf/internal/config"
|
||||||
"github.com/influxdata/telegraf/logger"
|
_ "github.com/influxdb/telegraf/outputs/all"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
_ "github.com/influxdb/telegraf/plugins/all"
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
|
||||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
|
||||||
|
|
||||||
"github.com/kardianos/service"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var fDebug = flag.Bool("debug", false,
|
var fDebug = flag.Bool("debug", false,
|
||||||
"turn on debug logging")
|
"show metrics as they're generated to stdout")
|
||||||
var fQuiet = flag.Bool("quiet", false,
|
|
||||||
"run in quiet mode")
|
|
||||||
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||||
var fConfig = flag.String("config", "", "configuration file to load")
|
var fConfig = flag.String("config", "", "configuration file to load")
|
||||||
var fConfigDirectory = flag.String("config-directory", "",
|
var fConfigDirectory = flag.String("configdirectory", "",
|
||||||
"directory containing additional *.conf files")
|
"directory containing additional *.conf files")
|
||||||
var fVersion = flag.Bool("version", false, "display the version")
|
var fVersion = flag.Bool("version", false, "display the version")
|
||||||
var fSampleConfig = flag.Bool("sample-config", false,
|
var fSampleConfig = flag.Bool("sample-config", false,
|
||||||
"print out full sample configuration")
|
"print out full sample configuration")
|
||||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||||
var fInputFilters = flag.String("input-filter", "",
|
var fPLuginFilters = flag.String("filter", "",
|
||||||
"filter the inputs to enable, separator is :")
|
"filter the plugins to enable, separator is :")
|
||||||
var fInputList = flag.Bool("input-list", false,
|
var fOutputFilters = flag.String("outputfilter", "",
|
||||||
"print available input plugins.")
|
|
||||||
var fOutputFilters = flag.String("output-filter", "",
|
|
||||||
"filter the outputs to enable, separator is :")
|
"filter the outputs to enable, separator is :")
|
||||||
var fOutputList = flag.Bool("output-list", false,
|
|
||||||
"print available output plugins.")
|
|
||||||
var fUsage = flag.String("usage", "",
|
var fUsage = flag.String("usage", "",
|
||||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||||
var fService = flag.String("service", "",
|
|
||||||
"operate on the service")
|
|
||||||
|
|
||||||
// Telegraf version, populated linker.
|
// Telegraf version
|
||||||
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
// -ldflags "-X main.Version=`git describe --always --tags`"
|
||||||
var (
|
var Version string
|
||||||
version string
|
|
||||||
commit string
|
|
||||||
branch string
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// If commit or branch are not set, make that clear.
|
|
||||||
if commit == "" {
|
|
||||||
commit = "unknown"
|
|
||||||
}
|
|
||||||
if branch == "" {
|
|
||||||
branch = "unknown"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
telegraf <flags>
|
|
||||||
|
|
||||||
The flags are:
|
|
||||||
|
|
||||||
-config <file> configuration file to load
|
|
||||||
-test gather metrics once, print them to stdout, and exit
|
|
||||||
-sample-config print out full sample configuration to stdout
|
|
||||||
-config-directory directory containing additional *.conf files
|
|
||||||
-input-filter filter the input plugins to enable, separator is :
|
|
||||||
-input-list print all the plugins inputs
|
|
||||||
-output-filter filter the output plugins to enable, separator is :
|
|
||||||
-output-list print all the available outputs
|
|
||||||
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
|
|
||||||
-debug print metrics as they're generated to stdout
|
|
||||||
-quiet run in quiet mode
|
|
||||||
-version print the version to stdout
|
|
||||||
-service Control the service, ie, 'telegraf -service install (windows only)'
|
|
||||||
|
|
||||||
In addition to the -config flag, telegraf will also load the config file from
|
|
||||||
an environment variable or default location. Precedence is:
|
|
||||||
1. -config flag
|
|
||||||
2. $TELEGRAF_CONFIG_PATH environment variable
|
|
||||||
3. $HOME/.telegraf/telegraf.conf
|
|
||||||
4. /etc/telegraf/telegraf.conf
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
# generate a telegraf config file:
|
|
||||||
telegraf -sample-config > telegraf.conf
|
|
||||||
|
|
||||||
# generate config with only cpu input & influxdb output plugins defined
|
|
||||||
telegraf -sample-config -input-filter cpu -output-filter influxdb
|
|
||||||
|
|
||||||
# run a single telegraf collection, outputing metrics to stdout
|
|
||||||
telegraf -config telegraf.conf -test
|
|
||||||
|
|
||||||
# run telegraf with all plugins defined in config file
|
|
||||||
telegraf -config telegraf.conf
|
|
||||||
|
|
||||||
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
|
||||||
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
|
|
||||||
`
|
|
||||||
|
|
||||||
var stop chan struct{}
|
|
||||||
|
|
||||||
var srvc service.Service
|
|
||||||
|
|
||||||
type program struct{}
|
|
||||||
|
|
||||||
func reloadLoop(stop chan struct{}, s service.Service) {
|
|
||||||
defer func() {
|
|
||||||
if service.Interactive() {
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}()
|
|
||||||
reload := make(chan bool, 1)
|
|
||||||
reload <- true
|
|
||||||
for <-reload {
|
|
||||||
reload <- false
|
|
||||||
flag.Usage = func() { usageExit(0) }
|
|
||||||
flag.Parse()
|
|
||||||
args := flag.Args()
|
|
||||||
|
|
||||||
var inputFilters []string
|
|
||||||
if *fInputFilters != "" {
|
|
||||||
inputFilter := strings.TrimSpace(*fInputFilters)
|
|
||||||
inputFilters = strings.Split(":"+inputFilter+":", ":")
|
|
||||||
}
|
|
||||||
var outputFilters []string
|
|
||||||
if *fOutputFilters != "" {
|
|
||||||
outputFilter := strings.TrimSpace(*fOutputFilters)
|
|
||||||
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(args) > 0 {
|
|
||||||
switch args[0] {
|
|
||||||
case "version":
|
|
||||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
|
||||||
return
|
|
||||||
case "config":
|
|
||||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// switch for flags which just do something and exit immediately
|
|
||||||
switch {
|
|
||||||
case *fOutputList:
|
|
||||||
fmt.Println("Available Output Plugins:")
|
|
||||||
for k, _ := range outputs.Outputs {
|
|
||||||
fmt.Printf(" %s\n", k)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case *fInputList:
|
|
||||||
fmt.Println("Available Input Plugins:")
|
|
||||||
for k, _ := range inputs.Inputs {
|
|
||||||
fmt.Printf(" %s\n", k)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case *fVersion:
|
|
||||||
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
|
|
||||||
return
|
|
||||||
case *fSampleConfig:
|
|
||||||
config.PrintSampleConfig(inputFilters, outputFilters)
|
|
||||||
return
|
|
||||||
case *fUsage != "":
|
|
||||||
if err := config.PrintInputConfig(*fUsage); err != nil {
|
|
||||||
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
|
||||||
log.Fatalf("%s and %s", err, err2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no other options are specified, load the config file and run.
|
|
||||||
c := config.NewConfig()
|
|
||||||
c.OutputFilters = outputFilters
|
|
||||||
c.InputFilters = inputFilters
|
|
||||||
err := c.LoadConfig(*fConfig)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if *fConfigDirectory != "" {
|
|
||||||
err = c.LoadDirectory(*fConfigDirectory)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(c.Outputs) == 0 {
|
|
||||||
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
|
||||||
}
|
|
||||||
if len(c.Inputs) == 0 {
|
|
||||||
log.Fatalf("Error: no inputs found, did you provide a valid config file?")
|
|
||||||
}
|
|
||||||
|
|
||||||
ag, err := agent.NewAgent(c)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup logging
|
|
||||||
logger.SetupLogging(
|
|
||||||
ag.Config.Agent.Debug || *fDebug,
|
|
||||||
ag.Config.Agent.Quiet || *fQuiet,
|
|
||||||
ag.Config.Agent.Logfile,
|
|
||||||
)
|
|
||||||
|
|
||||||
if *fTest {
|
|
||||||
err = ag.Test()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = ag.Connect()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
shutdown := make(chan struct{})
|
|
||||||
signals := make(chan os.Signal)
|
|
||||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case sig := <-signals:
|
|
||||||
if sig == os.Interrupt {
|
|
||||||
close(shutdown)
|
|
||||||
}
|
|
||||||
if sig == syscall.SIGHUP {
|
|
||||||
log.Printf("I! Reloading Telegraf config\n")
|
|
||||||
<-reload
|
|
||||||
reload <- true
|
|
||||||
close(shutdown)
|
|
||||||
}
|
|
||||||
case <-stop:
|
|
||||||
close(shutdown)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
log.Printf("I! Starting Telegraf (version %s)\n", version)
|
|
||||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
|
||||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
|
||||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
|
||||||
|
|
||||||
if *fPidfile != "" {
|
|
||||||
f, err := os.Create(*fPidfile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Unable to create pidfile: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(f, "%d\n", os.Getpid())
|
|
||||||
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
ag.Run(shutdown)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func usageExit(rc int) {
|
|
||||||
fmt.Println(usage)
|
|
||||||
os.Exit(rc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *program) Start(s service.Service) error {
|
|
||||||
srvc = s
|
|
||||||
go p.run()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (p *program) run() {
|
|
||||||
stop = make(chan struct{})
|
|
||||||
reloadLoop(stop, srvc)
|
|
||||||
}
|
|
||||||
func (p *program) Stop(s service.Service) error {
|
|
||||||
close(stop)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
svcConfig := &service.Config{
|
|
||||||
Name: "telegraf",
|
|
||||||
DisplayName: "Telegraf Data Collector Service",
|
|
||||||
Description: "Collects data using a series of plugins and publishes it to" +
|
|
||||||
"another series of plugins.",
|
|
||||||
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
|
||||||
}
|
|
||||||
|
|
||||||
prg := &program{}
|
var pluginFilters []string
|
||||||
s, err := service.New(prg, svcConfig)
|
if *fPLuginFilters != "" {
|
||||||
|
pluginsFilter := strings.TrimSpace(*fPLuginFilters)
|
||||||
|
pluginFilters = strings.Split(":"+pluginsFilter+":", ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
var outputFilters []string
|
||||||
|
if *fOutputFilters != "" {
|
||||||
|
outputFilter := strings.TrimSpace(*fOutputFilters)
|
||||||
|
outputFilters = strings.Split(":"+outputFilter+":", ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fVersion {
|
||||||
|
v := fmt.Sprintf("Telegraf - Version %s", Version)
|
||||||
|
fmt.Println(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fSampleConfig {
|
||||||
|
config.PrintSampleConfig(pluginFilters, outputFilters)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fUsage != "" {
|
||||||
|
if err := config.PrintPluginConfig(*fUsage); err != nil {
|
||||||
|
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
|
||||||
|
log.Fatalf("%s and %s", err, err2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
c *config.Config
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if *fConfig != "" {
|
||||||
|
c = config.NewConfig()
|
||||||
|
c.OutputFilters = outputFilters
|
||||||
|
c.PluginFilters = pluginFilters
|
||||||
|
err = c.LoadConfig(*fConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
// Handle the -service flag here to prevent any issues with tooling that
|
|
||||||
// may not have an interactive session, e.g. installing from Ansible.
|
|
||||||
if *fService != "" {
|
|
||||||
if *fConfig != "" {
|
|
||||||
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
|
||||||
}
|
|
||||||
err := service.Control(s, *fService)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = s.Run()
|
|
||||||
if err != nil {
|
|
||||||
log.Println("E! " + err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
stop = make(chan struct{})
|
fmt.Println("Usage: Telegraf")
|
||||||
reloadLoop(stop, nil)
|
flag.PrintDefaults()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *fConfigDirectory != "" {
|
||||||
|
err = c.LoadDirectory(*fConfigDirectory)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(c.Outputs) == 0 {
|
||||||
|
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
|
||||||
|
}
|
||||||
|
if len(c.Plugins) == 0 {
|
||||||
|
log.Fatalf("Error: no plugins found, did you provide a valid config file?")
|
||||||
|
}
|
||||||
|
|
||||||
|
ag, err := telegraf.NewAgent(c)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fDebug {
|
||||||
|
ag.Config.Agent.Debug = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if *fTest {
|
||||||
|
err = ag.Test()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ag.Connect()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
shutdown := make(chan struct{})
|
||||||
|
signals := make(chan os.Signal)
|
||||||
|
signal.Notify(signals, os.Interrupt)
|
||||||
|
go func() {
|
||||||
|
<-signals
|
||||||
|
close(shutdown)
|
||||||
|
}()
|
||||||
|
|
||||||
|
log.Printf("Starting Telegraf (version %s)\n", Version)
|
||||||
|
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||||
|
log.Printf("Loaded plugins: %s", strings.Join(c.PluginNames(), " "))
|
||||||
|
log.Printf("Tags enabled: %s", c.ListTags())
|
||||||
|
|
||||||
|
if *fPidfile != "" {
|
||||||
|
f, err := os.Create(*fPidfile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Unable to create pidfile: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(f, "%d\n", os.Getpid())
|
||||||
|
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
ag.Run(shutdown)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,285 +0,0 @@
|
|||||||
# Telegraf Configuration
|
|
||||||
|
|
||||||
## Generating a Configuration File
|
|
||||||
|
|
||||||
A default Telegraf config file can be generated using the -sample-config flag:
|
|
||||||
|
|
||||||
```
|
|
||||||
telegraf -sample-config > telegraf.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
To generate a file with specific inputs and outputs, you can use the
|
|
||||||
-input-filter and -output-filter flags:
|
|
||||||
|
|
||||||
```
|
|
||||||
telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka
|
|
||||||
```
|
|
||||||
|
|
||||||
You can see the latest config file with all available plugins here:
|
|
||||||
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
|
|
||||||
|
|
||||||
## Environment Variables
|
|
||||||
|
|
||||||
Environment variables can be used anywhere in the config file, simply prepend
|
|
||||||
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
|
||||||
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
|
||||||
|
|
||||||
## `[global_tags]` Configuration
|
|
||||||
|
|
||||||
Global tags can be specified in the `[global_tags]` section of the config file
|
|
||||||
in key="value" format. All metrics being gathered on this host will be tagged
|
|
||||||
with the tags specified here.
|
|
||||||
|
|
||||||
## `[agent]` Configuration
|
|
||||||
|
|
||||||
Telegraf has a few options you can configure under the `agent` section of the
|
|
||||||
config.
|
|
||||||
|
|
||||||
* **interval**: Default data collection interval for all inputs
|
|
||||||
* **round_interval**: Rounds collection interval to 'interval'
|
|
||||||
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
|
||||||
* **metric_batch_size**: Telegraf will send metrics to output in batch of at
|
|
||||||
most metric_batch_size metrics.
|
|
||||||
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
|
||||||
for each output, and will flush this buffer on a successful write.
|
|
||||||
This should be a multiple of metric_batch_size and could not be less
|
|
||||||
than 2 times metric_batch_size.
|
|
||||||
* **collection_jitter**: Collection jitter is used to jitter
|
|
||||||
the collection by a random amount.
|
|
||||||
Each plugin will sleep for a random time within jitter before collecting.
|
|
||||||
This can be used to avoid many plugins querying things like sysfs at the
|
|
||||||
same time, which can have a measurable effect on the system.
|
|
||||||
* **flush_interval**: Default data flushing interval for all outputs.
|
|
||||||
You should not set this below
|
|
||||||
interval. Maximum flush_interval will be flush_interval + flush_jitter
|
|
||||||
* **flush_jitter**: Jitter the flush interval by a random amount.
|
|
||||||
This is primarily to avoid
|
|
||||||
large write spikes for users running a large number of telegraf instances.
|
|
||||||
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
|
||||||
* **debug**: Run telegraf in debug mode.
|
|
||||||
* **quiet**: Run telegraf in quiet mode.
|
|
||||||
* **hostname**: Override default hostname, if empty use os.Hostname().
|
|
||||||
|
|
||||||
#### Measurement Filtering
|
|
||||||
|
|
||||||
Filters can be configured per input or output, see below for examples.
|
|
||||||
|
|
||||||
* **namepass**: An array of strings that is used to filter metrics generated by the
|
|
||||||
current input. Each string in the array is tested as a glob match against
|
|
||||||
measurement names and if it matches, the field is emitted.
|
|
||||||
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
|
|
||||||
* **fieldpass**: An array of strings that is used to filter metrics generated by the
|
|
||||||
current input. Each string in the array is tested as a glob match against field names
|
|
||||||
and if it matches, the field is emitted. fieldpass is not available for outputs.
|
|
||||||
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
|
|
||||||
fielddrop is not available for outputs.
|
|
||||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
|
||||||
measurements by the current input. Each string in the array is tested as a glob
|
|
||||||
match against the tag name, and if it matches the measurement is emitted.
|
|
||||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
|
|
||||||
emitted. This is tested on measurements that have passed the tagpass test.
|
|
||||||
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
|
|
||||||
As opposed to tagdrop, which will drop an entire measurement based on it's
|
|
||||||
tags, tagexclude simply strips the given tag keys from the measurement. This
|
|
||||||
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
|
|
||||||
as it is more efficient to filter out tags at the ingestion point.
|
|
||||||
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
|
|
||||||
the tag keys in the final measurement.
|
|
||||||
|
|
||||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
|
||||||
the plugin definition, otherwise subsequent plugin config options will be
|
|
||||||
interpreted as part of the tagpass/tagdrop map.
|
|
||||||
|
|
||||||
## Input Configuration
|
|
||||||
|
|
||||||
Some configuration options are configurable per input:
|
|
||||||
|
|
||||||
* **name_override**: Override the base name of the measurement.
|
|
||||||
(Default is the name of the input).
|
|
||||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
|
||||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
|
||||||
* **tags**: A map of tags to apply to a specific input's measurements.
|
|
||||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
|
||||||
global interval, but if one particular input should be run less or more often,
|
|
||||||
you can configure that here.
|
|
||||||
|
|
||||||
#### Input Configuration Examples
|
|
||||||
|
|
||||||
This is a full working config that will output CPU data to an InfluxDB instance
|
|
||||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
|
||||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
|
||||||
fields which begin with `time_`.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[global_tags]
|
|
||||||
dc = "denver-1"
|
|
||||||
|
|
||||||
[agent]
|
|
||||||
interval = "10s"
|
|
||||||
|
|
||||||
# OUTPUTS
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
url = "http://192.168.59.103:8086" # required.
|
|
||||||
database = "telegraf" # required.
|
|
||||||
precision = "s"
|
|
||||||
|
|
||||||
# INPUTS
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = true
|
|
||||||
totalcpu = false
|
|
||||||
# filter all fields beginning with 'time_'
|
|
||||||
fielddrop = ["time_*"]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Input Config: tagpass and tagdrop
|
|
||||||
|
|
||||||
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
|
||||||
the plugin definition, otherwise subsequent plugin config options will be
|
|
||||||
interpreted as part of the tagpass/tagdrop map.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = true
|
|
||||||
totalcpu = false
|
|
||||||
fielddrop = ["cpu_time"]
|
|
||||||
# Don't collect CPU data for cpu6 & cpu7
|
|
||||||
[inputs.cpu.tagdrop]
|
|
||||||
cpu = [ "cpu6", "cpu7" ]
|
|
||||||
|
|
||||||
[[inputs.disk]]
|
|
||||||
[inputs.disk.tagpass]
|
|
||||||
# tagpass conditions are OR, not AND.
|
|
||||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
|
||||||
# then the metric passes
|
|
||||||
fstype = [ "ext4", "xfs" ]
|
|
||||||
# Globs can also be used on the tag values
|
|
||||||
path = [ "/opt", "/home*" ]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Input Config: fieldpass and fielddrop
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# Drop all metrics for guest & steal CPU usage
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = false
|
|
||||||
totalcpu = true
|
|
||||||
fielddrop = ["usage_guest", "usage_steal"]
|
|
||||||
|
|
||||||
# Only store inode related metrics for disks
|
|
||||||
[[inputs.disk]]
|
|
||||||
fieldpass = ["inodes*"]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Input Config: namepass and namedrop
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# Drop all metrics about containers for kubelet
|
|
||||||
[[inputs.prometheus]]
|
|
||||||
urls = ["http://kube-node-1:4194/metrics"]
|
|
||||||
namedrop = ["container_*"]
|
|
||||||
|
|
||||||
# Only store rest client related metrics for kubelet
|
|
||||||
[[inputs.prometheus]]
|
|
||||||
urls = ["http://kube-node-1:4194/metrics"]
|
|
||||||
namepass = ["rest_client_*"]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Input Config: taginclude and tagexclude
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# Only include the "cpu" tag in the measurements for the cpu plugin.
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = true
|
|
||||||
totalcpu = true
|
|
||||||
taginclude = ["cpu"]
|
|
||||||
|
|
||||||
# Exclude the "fstype" tag from the measurements for the disk plugin.
|
|
||||||
[[inputs.disk]]
|
|
||||||
tagexclude = ["fstype"]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Input config: prefix, suffix, and override
|
|
||||||
|
|
||||||
This plugin will emit measurements with the name `cpu_total`
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.cpu]]
|
|
||||||
name_suffix = "_total"
|
|
||||||
percpu = false
|
|
||||||
totalcpu = true
|
|
||||||
```
|
|
||||||
|
|
||||||
This will emit measurements with the name `foobar`
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.cpu]]
|
|
||||||
name_override = "foobar"
|
|
||||||
percpu = false
|
|
||||||
totalcpu = true
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Input config: tags
|
|
||||||
|
|
||||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
|
||||||
`tag2=bar`
|
|
||||||
|
|
||||||
NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the
|
|
||||||
plugin definition.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = false
|
|
||||||
totalcpu = true
|
|
||||||
[inputs.cpu.tags]
|
|
||||||
tag1 = "foo"
|
|
||||||
tag2 = "bar"
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Multiple inputs of the same type
|
|
||||||
|
|
||||||
Additional inputs (or outputs) of the same type can be specified,
|
|
||||||
just define more instances in the config file. It is highly recommended that
|
|
||||||
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
|
|
||||||
to avoid measurement collisions:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = false
|
|
||||||
totalcpu = true
|
|
||||||
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = true
|
|
||||||
totalcpu = false
|
|
||||||
name_override = "percpu_usage"
|
|
||||||
fielddrop = ["cpu_time*"]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Output Configuration
|
|
||||||
|
|
||||||
Telegraf also supports specifying multiple output sinks to send data to,
|
|
||||||
configuring each output sink is different, but examples can be
|
|
||||||
found by running `telegraf -sample-config`.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
urls = [ "http://localhost:8086" ]
|
|
||||||
database = "telegraf"
|
|
||||||
precision = "s"
|
|
||||||
# Drop all measurements that start with "aerospike"
|
|
||||||
namedrop = ["aerospike*"]
|
|
||||||
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
urls = [ "http://localhost:8086" ]
|
|
||||||
database = "telegraf-aerospike-data"
|
|
||||||
precision = "s"
|
|
||||||
# Only accept aerospike data:
|
|
||||||
namepass = ["aerospike*"]
|
|
||||||
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
urls = [ "http://localhost:8086" ]
|
|
||||||
database = "telegraf-cpu0-data"
|
|
||||||
precision = "s"
|
|
||||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
|
||||||
[outputs.influxdb.tagpass]
|
|
||||||
cpu = ["cpu0"]
|
|
||||||
```
|
|
||||||
@@ -1,374 +0,0 @@
|
|||||||
# Telegraf Input Data Formats
|
|
||||||
|
|
||||||
Telegraf is able to parse the following input data formats into metrics:
|
|
||||||
|
|
||||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx)
|
|
||||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json)
|
|
||||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
|
|
||||||
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
|
||||||
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
|
||||||
|
|
||||||
Telegraf metrics, like InfluxDB
|
|
||||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
|
||||||
are a combination of four basic parts:
|
|
||||||
|
|
||||||
1. Measurement Name
|
|
||||||
1. Tags
|
|
||||||
1. Fields
|
|
||||||
1. Timestamp
|
|
||||||
|
|
||||||
These four parts are easily defined when using InfluxDB line-protocol as a
|
|
||||||
data format. But there are other data formats that users may want to use which
|
|
||||||
require more advanced configuration to create usable Telegraf metrics.
|
|
||||||
|
|
||||||
Plugins such as `exec` and `kafka_consumer` parse textual data. Up until now,
|
|
||||||
these plugins were statically configured to parse just a single
|
|
||||||
data format. `exec` mostly only supported parsing JSON, and `kafka_consumer` only
|
|
||||||
supported data in InfluxDB line-protocol.
|
|
||||||
|
|
||||||
But now we are normalizing the parsing of various data formats across all
|
|
||||||
plugins that can support it. You will be able to identify a plugin that supports
|
|
||||||
different data formats by the presence of a `data_format` config option, for
|
|
||||||
example, in the exec plugin:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.exec]]
|
|
||||||
## Commands array
|
|
||||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
|
||||||
|
|
||||||
## measurement name suffix (for separating different commands)
|
|
||||||
name_suffix = "_mycollector"
|
|
||||||
|
|
||||||
## Data format to consume.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
||||||
data_format = "json"
|
|
||||||
|
|
||||||
## Additional configuration options go here
|
|
||||||
```
|
|
||||||
|
|
||||||
Each data_format has an additional set of configuration options available, which
|
|
||||||
I'll go over below.
|
|
||||||
|
|
||||||
# Influx:
|
|
||||||
|
|
||||||
There are no additional configuration options for InfluxDB line-protocol. The
|
|
||||||
metrics are parsed directly into Telegraf metrics.
|
|
||||||
|
|
||||||
#### Influx Configuration:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.exec]]
|
|
||||||
## Commands array
|
|
||||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
|
||||||
|
|
||||||
## measurement name suffix (for separating different commands)
|
|
||||||
name_suffix = "_mycollector"
|
|
||||||
|
|
||||||
## Data format to consume.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
||||||
data_format = "influx"
|
|
||||||
```
|
|
||||||
|
|
||||||
# JSON:
|
|
||||||
|
|
||||||
The JSON data format flattens JSON into metric _fields_.
|
|
||||||
NOTE: Only numerical values are converted to fields, and they are converted
|
|
||||||
into a float. strings are ignored unless specified as a tag_key (see below).
|
|
||||||
|
|
||||||
So for example, this JSON:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"a": 5,
|
|
||||||
"b": {
|
|
||||||
"c": 6
|
|
||||||
},
|
|
||||||
"ignored": "I'm a string"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Would get translated into _fields_ of a measurement:
|
|
||||||
|
|
||||||
```
|
|
||||||
myjsonmetric a=5,b_c=6
|
|
||||||
```
|
|
||||||
|
|
||||||
The _measurement_ _name_ is usually the name of the plugin,
|
|
||||||
but can be overridden using the `name_override` config option.
|
|
||||||
|
|
||||||
#### JSON Configuration:
|
|
||||||
|
|
||||||
The JSON data format supports specifying "tag keys". If specified, keys
|
|
||||||
will be searched for in the root-level of the JSON blob. If the key(s) exist,
|
|
||||||
they will be applied as tags to the Telegraf metrics.
|
|
||||||
|
|
||||||
For example, if you had this configuration:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.exec]]
|
|
||||||
## Commands array
|
|
||||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
|
||||||
|
|
||||||
## measurement name suffix (for separating different commands)
|
|
||||||
name_suffix = "_mycollector"
|
|
||||||
|
|
||||||
## Data format to consume.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
||||||
data_format = "json"
|
|
||||||
|
|
||||||
## List of tag names to extract from top-level of JSON server response
|
|
||||||
tag_keys = [
|
|
||||||
"my_tag_1",
|
|
||||||
"my_tag_2"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
with this JSON output from a command:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"a": 5,
|
|
||||||
"b": {
|
|
||||||
"c": 6
|
|
||||||
},
|
|
||||||
"my_tag_1": "foo"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Your Telegraf metrics would get tagged with "my_tag_1"
|
|
||||||
|
|
||||||
```
|
|
||||||
exec_mycollector,my_tag_1=foo a=5,b_c=6
|
|
||||||
```
|
|
||||||
|
|
||||||
# Value:
|
|
||||||
|
|
||||||
The "value" data format translates single values into Telegraf metrics. This
|
|
||||||
is done by assigning a measurement name and setting a single field ("value")
|
|
||||||
as the parsed metric.
|
|
||||||
|
|
||||||
#### Value Configuration:
|
|
||||||
|
|
||||||
You **must** tell Telegraf what type of metric to collect by using the
|
|
||||||
`data_type` configuration option. Available options are:
|
|
||||||
|
|
||||||
1. integer
|
|
||||||
2. float or long
|
|
||||||
3. string
|
|
||||||
4. boolean
|
|
||||||
|
|
||||||
**Note:** It is also recommended that you set `name_override` to a measurement
|
|
||||||
name that makes sense for your metric, otherwise it will just be set to the
|
|
||||||
name of the plugin.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.exec]]
|
|
||||||
## Commands array
|
|
||||||
commands = ["cat /proc/sys/kernel/random/entropy_avail"]
|
|
||||||
|
|
||||||
## override the default metric name of "exec"
|
|
||||||
name_override = "entropy_available"
|
|
||||||
|
|
||||||
## Data format to consume.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
||||||
data_format = "value"
|
|
||||||
data_type = "integer" # required
|
|
||||||
```
|
|
||||||
|
|
||||||
# Graphite:
|
|
||||||
|
|
||||||
The Graphite data format translates graphite _dot_ buckets directly into
|
|
||||||
telegraf measurement names, with a single value field, and without any tags.
|
|
||||||
By default, the separator is left as ".", but this can be changed using the
|
|
||||||
"separator" argument. For more advanced options,
|
|
||||||
Telegraf supports specifying "templates" to translate
|
|
||||||
graphite buckets into Telegraf metrics.
|
|
||||||
|
|
||||||
Templates are of the form:
|
|
||||||
|
|
||||||
```
|
|
||||||
"host.mytag.mytag.measurement.measurement.field*"
|
|
||||||
```
|
|
||||||
|
|
||||||
Where the following keywords exist:
|
|
||||||
|
|
||||||
1. `measurement`: specifies that this section of the graphite bucket corresponds
|
|
||||||
to the measurement name. This can be specified multiple times.
|
|
||||||
2. `field`: specifies that this section of the graphite bucket corresponds
|
|
||||||
to the field name. This can be specified multiple times.
|
|
||||||
3. `measurement*`: specifies that all remaining elements of the graphite bucket
|
|
||||||
correspond to the measurement name.
|
|
||||||
4. `field*`: specifies that all remaining elements of the graphite bucket
|
|
||||||
correspond to the field name.
|
|
||||||
|
|
||||||
Any part of the template that is not a keyword is treated as a tag key. This
|
|
||||||
can also be specified multiple times.
|
|
||||||
|
|
||||||
NOTE: `field*` cannot be used in conjunction with `measurement*`!
|
|
||||||
|
|
||||||
#### Measurement & Tag Templates:
|
|
||||||
|
|
||||||
The most basic template is to specify a single transformation to apply to all
|
|
||||||
incoming metrics. So the following template:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
templates = [
|
|
||||||
"region.region.measurement*"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
would result in the following Graphite -> Telegraf transformation.
|
|
||||||
|
|
||||||
```
|
|
||||||
us.west.cpu.load 100
|
|
||||||
=> cpu.load,region=us.west value=100
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Field Templates:
|
|
||||||
|
|
||||||
The field keyword tells Telegraf to give the metric that field name.
|
|
||||||
So the following template:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
separator = "_"
|
|
||||||
templates = [
|
|
||||||
"measurement.measurement.field.field.region"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
would result in the following Graphite -> Telegraf transformation.
|
|
||||||
|
|
||||||
```
|
|
||||||
cpu.usage.idle.percent.eu-east 100
|
|
||||||
=> cpu_usage,region=eu-east idle_percent=100
|
|
||||||
```
|
|
||||||
|
|
||||||
The field key can also be derived from all remaining elements of the graphite
|
|
||||||
bucket by specifying `field*`:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
separator = "_"
|
|
||||||
templates = [
|
|
||||||
"measurement.measurement.region.field*"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
which would result in the following Graphite -> Telegraf transformation.
|
|
||||||
|
|
||||||
```
|
|
||||||
cpu.usage.eu-east.idle.percentage 100
|
|
||||||
=> cpu_usage,region=eu-east idle_percentage=100
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Filter Templates:
|
|
||||||
|
|
||||||
Users can also filter the template(s) to use based on the name of the bucket,
|
|
||||||
using glob matching, like so:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
templates = [
|
|
||||||
"cpu.* measurement.measurement.region",
|
|
||||||
"mem.* measurement.measurement.host"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
which would result in the following transformation:
|
|
||||||
|
|
||||||
```
|
|
||||||
cpu.load.eu-east 100
|
|
||||||
=> cpu_load,region=eu-east value=100
|
|
||||||
|
|
||||||
mem.cached.localhost 256
|
|
||||||
=> mem_cached,host=localhost value=256
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Adding Tags:
|
|
||||||
|
|
||||||
Additional tags can be added to a metric that don't exist on the received metric.
|
|
||||||
You can add additional tags by specifying them after the pattern.
|
|
||||||
Tags have the same format as the line protocol.
|
|
||||||
Multiple tags are separated by commas.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
templates = [
|
|
||||||
"measurement.measurement.field.region datacenter=1a"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
would result in the following Graphite -> Telegraf transformation.
|
|
||||||
|
|
||||||
```
|
|
||||||
cpu.usage.idle.eu-east 100
|
|
||||||
=> cpu_usage,region=eu-east,datacenter=1a idle=100
|
|
||||||
```
|
|
||||||
|
|
||||||
There are many more options available,
|
|
||||||
[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates)
|
|
||||||
|
|
||||||
#### Graphite Configuration:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.exec]]
|
|
||||||
## Commands array
|
|
||||||
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
|
||||||
|
|
||||||
## measurement name suffix (for separating different commands)
|
|
||||||
name_suffix = "_mycollector"
|
|
||||||
|
|
||||||
## Data format to consume.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
||||||
data_format = "graphite"
|
|
||||||
|
|
||||||
## This string will be used to join the matched values.
|
|
||||||
separator = "_"
|
|
||||||
|
|
||||||
## Each template line requires a template pattern. It can have an optional
|
|
||||||
## filter before the template and separated by spaces. It can also have optional extra
|
|
||||||
## tags following the template. Multiple tags should be separated by commas and no spaces
|
|
||||||
## similar to the line protocol format. There can be only one default template.
|
|
||||||
## Templates support below format:
|
|
||||||
## 1. filter + template
|
|
||||||
## 2. filter + template + extra tag(s)
|
|
||||||
## 3. filter + template with field key
|
|
||||||
## 4. default template
|
|
||||||
templates = [
|
|
||||||
"*.app env.service.resource.measurement",
|
|
||||||
"stats.* .host.measurement* region=eu-east,agent=sensu",
|
|
||||||
"stats2.* .host.measurement.field",
|
|
||||||
"measurement*"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
# Nagios:
|
|
||||||
|
|
||||||
There are no additional configuration options for Nagios line-protocol. The
|
|
||||||
metrics are parsed directly into Telegraf metrics.
|
|
||||||
|
|
||||||
Note: Nagios Input Data Formats is only supported in `exec` input plugin.
|
|
||||||
|
|
||||||
#### Nagios Configuration:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[inputs.exec]]
|
|
||||||
## Commands array
|
|
||||||
commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
|
|
||||||
|
|
||||||
## measurement name suffix (for separating different commands)
|
|
||||||
name_suffix = "_mycollector"
|
|
||||||
|
|
||||||
## Data format to consume.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
||||||
data_format = "nagios"
|
|
||||||
```
|
|
||||||
@@ -1,150 +0,0 @@
|
|||||||
# Telegraf Output Data Formats
|
|
||||||
|
|
||||||
Telegraf is able to serialize metrics into the following output data formats:
|
|
||||||
|
|
||||||
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
|
|
||||||
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
|
|
||||||
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
|
|
||||||
|
|
||||||
Telegraf metrics, like InfluxDB
|
|
||||||
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
|
||||||
are a combination of four basic parts:
|
|
||||||
|
|
||||||
1. Measurement Name
|
|
||||||
1. Tags
|
|
||||||
1. Fields
|
|
||||||
1. Timestamp
|
|
||||||
|
|
||||||
In InfluxDB line protocol, these 4 parts are easily defined in textual form:
|
|
||||||
|
|
||||||
```
|
|
||||||
measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]
|
|
||||||
```
|
|
||||||
|
|
||||||
For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`),
|
|
||||||
InfluxDB line protocol was originally the only available output format. But now
|
|
||||||
we are normalizing telegraf metric "serializers" into a
|
|
||||||
[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers)
|
|
||||||
across all output plugins that can support it.
|
|
||||||
You will be able to identify a plugin that supports different data formats
|
|
||||||
by the presence of a `data_format`
|
|
||||||
config option, for example, in the `file` output plugin:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[outputs.file]]
|
|
||||||
## Files to write to, "stdout" is a specially handled file.
|
|
||||||
files = ["stdout"]
|
|
||||||
|
|
||||||
## Data format to output.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
||||||
data_format = "influx"
|
|
||||||
|
|
||||||
## Additional configuration options go here
|
|
||||||
```
|
|
||||||
|
|
||||||
Each data_format has an additional set of configuration options available, which
|
|
||||||
I'll go over below.
|
|
||||||
|
|
||||||
# Influx:
|
|
||||||
|
|
||||||
There are no additional configuration options for InfluxDB line-protocol. The
|
|
||||||
metrics are serialized directly into InfluxDB line-protocol.
|
|
||||||
|
|
||||||
### Influx Configuration:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[outputs.file]]
|
|
||||||
## Files to write to, "stdout" is a specially handled file.
|
|
||||||
files = ["stdout", "/tmp/metrics.out"]
|
|
||||||
|
|
||||||
## Data format to output.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
||||||
data_format = "influx"
|
|
||||||
```
|
|
||||||
|
|
||||||
# Graphite:
|
|
||||||
|
|
||||||
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
|
|
||||||
template can be specified for the output of Telegraf metrics into Graphite
|
|
||||||
buckets. The default template is:
|
|
||||||
|
|
||||||
```
|
|
||||||
template = "host.tags.measurement.field"
|
|
||||||
```
|
|
||||||
|
|
||||||
In the above template, we have four parts:
|
|
||||||
|
|
||||||
1. _host_ is a tag key. This can be any tag key that is in the Telegraf
|
|
||||||
metric(s). If the key doesn't exist, it will be ignored. If it does exist, the
|
|
||||||
tag value will be filled in.
|
|
||||||
1. _tags_ is a special keyword that outputs all remaining tag values, separated
|
|
||||||
by dots and in alphabetical order (by tag key). These will be filled after all
|
|
||||||
tag keys are filled.
|
|
||||||
1. _measurement_ is a special keyword that outputs the measurement name.
|
|
||||||
1. _field_ is a special keyword that outputs the field name.
|
|
||||||
|
|
||||||
Which means the following influx metric -> graphite conversion would happen:
|
|
||||||
|
|
||||||
```
|
|
||||||
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
|
||||||
=>
|
|
||||||
tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
|
||||||
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
|
||||||
```
|
|
||||||
|
|
||||||
### Graphite Configuration:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[outputs.file]]
|
|
||||||
## Files to write to, "stdout" is a specially handled file.
|
|
||||||
files = ["stdout", "/tmp/metrics.out"]
|
|
||||||
|
|
||||||
## Data format to output.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
||||||
data_format = "graphite"
|
|
||||||
|
|
||||||
# prefix each graphite bucket
|
|
||||||
prefix = "telegraf"
|
|
||||||
# graphite template
|
|
||||||
template = "host.tags.measurement.field"
|
|
||||||
```
|
|
||||||
|
|
||||||
# JSON:
|
|
||||||
|
|
||||||
The JSON data format serialized Telegraf metrics in json format. The format is:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"fields":{
|
|
||||||
"field_1":30,
|
|
||||||
"field_2":4,
|
|
||||||
"field_N":59,
|
|
||||||
"n_images":660
|
|
||||||
},
|
|
||||||
"name":"docker",
|
|
||||||
"tags":{
|
|
||||||
"host":"raynor"
|
|
||||||
},
|
|
||||||
"timestamp":1458229140
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### JSON Configuration:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[outputs.file]]
|
|
||||||
## Files to write to, "stdout" is a specially handled file.
|
|
||||||
files = ["stdout", "/tmp/metrics.out"]
|
|
||||||
|
|
||||||
## Data format to output.
|
|
||||||
## Each data format has it's own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
||||||
data_format = "json"
|
|
||||||
```
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
# Running Telegraf as a Windows Service
|
|
||||||
|
|
||||||
Telegraf natively supports running as a Windows Service. Outlined below is are
|
|
||||||
the general steps to set it up.
|
|
||||||
|
|
||||||
1. Obtain the telegraf windows distribution
|
|
||||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
|
||||||
location simply specify the `-config` parameter with the desired location)
|
|
||||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
|
||||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
|
||||||
|
|
||||||
```
|
|
||||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Edit the configuration file to meet your needs
|
|
||||||
6. To check that it works, run:
|
|
||||||
|
|
||||||
```
|
|
||||||
> C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test
|
|
||||||
```
|
|
||||||
|
|
||||||
7. To start collecting data, run:
|
|
||||||
|
|
||||||
```
|
|
||||||
> net start telegraf
|
|
||||||
```
|
|
||||||
|
|
||||||
## Other supported operations
|
|
||||||
|
|
||||||
Telegraf can manage its own service through the --service flag:
|
|
||||||
|
|
||||||
| Command | Effect |
|
|
||||||
|------------------------------------|-------------------------------|
|
|
||||||
| `telegraf.exe --service install` | Install telegraf as a service |
|
|
||||||
| `telegraf.exe --service uninstall` | Remove the telegraf service |
|
|
||||||
| `telegraf.exe --service start` | Start the telegraf service |
|
|
||||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
|
||||||
|
|
||||||
1935
etc/telegraf.conf
1935
etc/telegraf.conf
File diff suppressed because it is too large
Load Diff
@@ -1,202 +0,0 @@
|
|||||||
# Telegraf configuration
|
|
||||||
|
|
||||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
|
||||||
# declared inputs, and sent to the declared outputs.
|
|
||||||
|
|
||||||
# Plugins must be declared in here to be active.
|
|
||||||
# To deactivate a plugin, comment out the name and any variables.
|
|
||||||
|
|
||||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
|
||||||
# file would generate.
|
|
||||||
|
|
||||||
# Global tags can be specified here in key="value" format.
|
|
||||||
[global_tags]
|
|
||||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
|
||||||
# rack = "1a"
|
|
||||||
|
|
||||||
# Configuration for telegraf agent
|
|
||||||
[agent]
|
|
||||||
## Default data collection interval for all inputs
|
|
||||||
interval = "10s"
|
|
||||||
## Rounds collection interval to 'interval'
|
|
||||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
|
||||||
round_interval = true
|
|
||||||
|
|
||||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
|
||||||
## flush this buffer on a successful write.
|
|
||||||
metric_buffer_limit = 1000
|
|
||||||
## Flush the buffer whenever full, regardless of flush_interval.
|
|
||||||
flush_buffer_when_full = true
|
|
||||||
|
|
||||||
## Collection jitter is used to jitter the collection by a random amount.
|
|
||||||
## Each plugin will sleep for a random time within jitter before collecting.
|
|
||||||
## This can be used to avoid many plugins querying things like sysfs at the
|
|
||||||
## same time, which can have a measurable effect on the system.
|
|
||||||
collection_jitter = "0s"
|
|
||||||
|
|
||||||
## Default flushing interval for all outputs. You shouldn't set this below
|
|
||||||
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
|
||||||
flush_interval = "10s"
|
|
||||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
|
||||||
## large write spikes for users running a large number of telegraf instances.
|
|
||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
|
||||||
flush_jitter = "0s"
|
|
||||||
|
|
||||||
## Logging configuration:
|
|
||||||
## Run telegraf in debug mode
|
|
||||||
debug = false
|
|
||||||
## Run telegraf in quiet mode
|
|
||||||
quiet = false
|
|
||||||
## Specify the log file name. The empty string means to log to stdout.
|
|
||||||
logfile = "/Program Files/Telegraf/telegraf.log"
|
|
||||||
|
|
||||||
## Override default hostname, if empty use os.Hostname()
|
|
||||||
hostname = ""
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# OUTPUTS #
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
# Configuration for influxdb server to send metrics to
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
|
||||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
|
||||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
|
||||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
|
||||||
urls = ["http://localhost:8086"] # required
|
|
||||||
# The target database for metrics (telegraf will create it if not exists)
|
|
||||||
database = "telegraf" # required
|
|
||||||
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
|
||||||
# note: using second precision greatly helps InfluxDB compression
|
|
||||||
precision = "s"
|
|
||||||
|
|
||||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
|
||||||
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
|
||||||
timeout = "5s"
|
|
||||||
# username = "telegraf"
|
|
||||||
# password = "metricsmetricsmetricsmetrics"
|
|
||||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
|
||||||
# user_agent = "telegraf"
|
|
||||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
|
||||||
# udp_payload = 512
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# INPUTS #
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
# Windows Performance Counters plugin.
|
|
||||||
# These are the recommended method of monitoring system metrics on windows,
|
|
||||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
|
||||||
# which utilize more system resources.
|
|
||||||
#
|
|
||||||
# See more configuration examples at:
|
|
||||||
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
|
||||||
|
|
||||||
[[inputs.win_perf_counters]]
|
|
||||||
[[inputs.win_perf_counters.object]]
|
|
||||||
# Processor usage, alternative to native, reports on a per core.
|
|
||||||
ObjectName = "Processor"
|
|
||||||
Instances = ["*"]
|
|
||||||
Counters = [
|
|
||||||
"% Idle Time",
|
|
||||||
"% Interrupt Time",
|
|
||||||
"% Privileged Time",
|
|
||||||
"% User Time",
|
|
||||||
"% Processor Time",
|
|
||||||
]
|
|
||||||
Measurement = "win_cpu"
|
|
||||||
# Set to true to include _Total instance when querying for all (*).
|
|
||||||
#IncludeTotal=false
|
|
||||||
|
|
||||||
[[inputs.win_perf_counters.object]]
|
|
||||||
# Disk times and queues
|
|
||||||
ObjectName = "LogicalDisk"
|
|
||||||
Instances = ["*"]
|
|
||||||
Counters = [
|
|
||||||
"% Idle Time",
|
|
||||||
"% Disk Time","% Disk Read Time",
|
|
||||||
"% Disk Write Time",
|
|
||||||
"% User Time",
|
|
||||||
"Current Disk Queue Length",
|
|
||||||
]
|
|
||||||
Measurement = "win_disk"
|
|
||||||
# Set to true to include _Total instance when querying for all (*).
|
|
||||||
#IncludeTotal=false
|
|
||||||
|
|
||||||
[[inputs.win_perf_counters.object]]
|
|
||||||
ObjectName = "System"
|
|
||||||
Counters = [
|
|
||||||
"Context Switches/sec",
|
|
||||||
"System Calls/sec",
|
|
||||||
]
|
|
||||||
Instances = ["------"]
|
|
||||||
Measurement = "win_system"
|
|
||||||
# Set to true to include _Total instance when querying for all (*).
|
|
||||||
#IncludeTotal=false
|
|
||||||
|
|
||||||
[[inputs.win_perf_counters.object]]
|
|
||||||
# Example query where the Instance portion must be removed to get data back,
|
|
||||||
# such as from the Memory object.
|
|
||||||
ObjectName = "Memory"
|
|
||||||
Counters = [
|
|
||||||
"Available Bytes",
|
|
||||||
"Cache Faults/sec",
|
|
||||||
"Demand Zero Faults/sec",
|
|
||||||
"Page Faults/sec",
|
|
||||||
"Pages/sec",
|
|
||||||
"Transition Faults/sec",
|
|
||||||
"Pool Nonpaged Bytes",
|
|
||||||
"Pool Paged Bytes",
|
|
||||||
]
|
|
||||||
# Use 6 x - to remove the Instance bit from the query.
|
|
||||||
Instances = ["------"]
|
|
||||||
Measurement = "win_mem"
|
|
||||||
# Set to true to include _Total instance when querying for all (*).
|
|
||||||
#IncludeTotal=false
|
|
||||||
|
|
||||||
|
|
||||||
# Windows system plugins using WMI (disabled by default, using
|
|
||||||
# win_perf_counters over WMI is recommended)
|
|
||||||
|
|
||||||
# # Read metrics about cpu usage
|
|
||||||
# [[inputs.cpu]]
|
|
||||||
# ## Whether to report per-cpu stats or not
|
|
||||||
# percpu = true
|
|
||||||
# ## Whether to report total system cpu stats or not
|
|
||||||
# totalcpu = true
|
|
||||||
# ## Comment this line if you want the raw CPU time metrics
|
|
||||||
# fielddrop = ["time_*"]
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about disk usage by mount point
|
|
||||||
# [[inputs.disk]]
|
|
||||||
# ## By default, telegraf gather stats for all mountpoints.
|
|
||||||
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
|
|
||||||
# ## mount_points=["/"]
|
|
||||||
#
|
|
||||||
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
|
||||||
# ## present on /run, /var/run, /dev/shm or /dev).
|
|
||||||
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about disk IO by device
|
|
||||||
# [[inputs.diskio]]
|
|
||||||
# ## By default, telegraf will gather stats for all devices including
|
|
||||||
# ## disk partitions.
|
|
||||||
# ## Setting devices will restrict the stats to the specified devices.
|
|
||||||
# ## devices = ["sda", "sdb"]
|
|
||||||
# ## Uncomment the following line if you do not need disk serial numbers.
|
|
||||||
# ## skip_serial_number = true
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about memory usage
|
|
||||||
# [[inputs.mem]]
|
|
||||||
# # no configuration
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about swap memory usage
|
|
||||||
# [[inputs.swap]]
|
|
||||||
# # no configuration
|
|
||||||
|
|
||||||
@@ -1,79 +0,0 @@
|
|||||||
package filter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/gobwas/glob"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Filter interface {
|
|
||||||
Match(string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile takes a list of string filters and returns a Filter interface
|
|
||||||
// for matching a given string against the filter list. The filter list
|
|
||||||
// supports glob matching too, ie:
|
|
||||||
//
|
|
||||||
// f, _ := Compile([]string{"cpu", "mem", "net*"})
|
|
||||||
// f.Match("cpu") // true
|
|
||||||
// f.Match("network") // true
|
|
||||||
// f.Match("memory") // false
|
|
||||||
//
|
|
||||||
func Compile(filters []string) (Filter, error) {
|
|
||||||
// return if there is nothing to compile
|
|
||||||
if len(filters) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if we can compile a non-glob filter
|
|
||||||
noGlob := true
|
|
||||||
for _, filter := range filters {
|
|
||||||
if hasMeta(filter) {
|
|
||||||
noGlob = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case noGlob:
|
|
||||||
// return non-globbing filter if not needed.
|
|
||||||
return compileFilterNoGlob(filters), nil
|
|
||||||
case len(filters) == 1:
|
|
||||||
return glob.Compile(filters[0])
|
|
||||||
default:
|
|
||||||
return glob.Compile("{" + strings.Join(filters, ",") + "}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// hasMeta reports whether path contains any magic glob characters.
|
|
||||||
func hasMeta(s string) bool {
|
|
||||||
return strings.IndexAny(s, "*?[") >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type filter struct {
|
|
||||||
m map[string]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filter) Match(s string) bool {
|
|
||||||
_, ok := f.m[s]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
type filtersingle struct {
|
|
||||||
s string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filtersingle) Match(s string) bool {
|
|
||||||
return f.s == s
|
|
||||||
}
|
|
||||||
|
|
||||||
func compileFilterNoGlob(filters []string) Filter {
|
|
||||||
if len(filters) == 1 {
|
|
||||||
return &filtersingle{s: filters[0]}
|
|
||||||
}
|
|
||||||
out := filter{m: make(map[string]struct{})}
|
|
||||||
for _, filter := range filters {
|
|
||||||
out.m[filter] = struct{}{}
|
|
||||||
}
|
|
||||||
return &out
|
|
||||||
}
|
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
package filter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCompile(t *testing.T) {
|
|
||||||
f, err := Compile([]string{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, f)
|
|
||||||
|
|
||||||
f, err = Compile([]string{"cpu"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.False(t, f.Match("cpu0"))
|
|
||||||
assert.False(t, f.Match("mem"))
|
|
||||||
|
|
||||||
f, err = Compile([]string{"cpu*"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.True(t, f.Match("cpu0"))
|
|
||||||
assert.False(t, f.Match("mem"))
|
|
||||||
|
|
||||||
f, err = Compile([]string{"cpu", "mem"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.False(t, f.Match("cpu0"))
|
|
||||||
assert.True(t, f.Match("mem"))
|
|
||||||
|
|
||||||
f, err = Compile([]string{"cpu", "mem", "net*"})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, f.Match("cpu"))
|
|
||||||
assert.False(t, f.Match("cpu0"))
|
|
||||||
assert.True(t, f.Match("mem"))
|
|
||||||
assert.True(t, f.Match("network"))
|
|
||||||
}
|
|
||||||
|
|
||||||
var benchbool bool
|
|
||||||
|
|
||||||
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
|
||||||
f, _ := Compile([]string{"cpu"})
|
|
||||||
var tmp bool
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
tmp = f.Match("network")
|
|
||||||
}
|
|
||||||
benchbool = tmp
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
|
|
||||||
f, _ := Compile([]string{"cpu"})
|
|
||||||
var tmp bool
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
tmp = f.Match("cpu")
|
|
||||||
}
|
|
||||||
benchbool = tmp
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFilter(b *testing.B) {
|
|
||||||
f, _ := Compile([]string{"cpu", "mem", "net*"})
|
|
||||||
var tmp bool
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
tmp = f.Match("network")
|
|
||||||
}
|
|
||||||
benchbool = tmp
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFilterNoGlob(b *testing.B) {
|
|
||||||
f, _ := Compile([]string{"cpu", "mem", "net"})
|
|
||||||
var tmp bool
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
tmp = f.Match("net")
|
|
||||||
}
|
|
||||||
benchbool = tmp
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFilter2(b *testing.B) {
|
|
||||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
|
||||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
|
|
||||||
var tmp bool
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
tmp = f.Match("network")
|
|
||||||
}
|
|
||||||
benchbool = tmp
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkFilter2NoGlob(b *testing.B) {
|
|
||||||
f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
|
|
||||||
"aw", "az", "axxx", "ab", "cpu", "mem", "net"})
|
|
||||||
var tmp bool
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
tmp = f.Match("net")
|
|
||||||
}
|
|
||||||
benchbool = tmp
|
|
||||||
}
|
|
||||||
31
input.go
31
input.go
@@ -1,31 +0,0 @@
|
|||||||
package telegraf
|
|
||||||
|
|
||||||
type Input interface {
|
|
||||||
// SampleConfig returns the default configuration of the Input
|
|
||||||
SampleConfig() string
|
|
||||||
|
|
||||||
// Description returns a one-sentence description on the Input
|
|
||||||
Description() string
|
|
||||||
|
|
||||||
// Gather takes in an accumulator and adds the metrics that the Input
|
|
||||||
// gathers. This is called every "interval"
|
|
||||||
Gather(Accumulator) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type ServiceInput interface {
|
|
||||||
// SampleConfig returns the default configuration of the Input
|
|
||||||
SampleConfig() string
|
|
||||||
|
|
||||||
// Description returns a one-sentence description on the Input
|
|
||||||
Description() string
|
|
||||||
|
|
||||||
// Gather takes in an accumulator and adds the metrics that the Input
|
|
||||||
// gathers. This is called every "interval"
|
|
||||||
Gather(Accumulator) error
|
|
||||||
|
|
||||||
// Start starts the ServiceInput's service, whatever that may be
|
|
||||||
Start(Accumulator) error
|
|
||||||
|
|
||||||
// Stop stops the services and closes any necessary channels and connections
|
|
||||||
Stop()
|
|
||||||
}
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
package buffer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Buffer is an object for storing metrics in a circular buffer.
|
|
||||||
type Buffer struct {
|
|
||||||
buf chan telegraf.Metric
|
|
||||||
// total dropped metrics
|
|
||||||
drops int
|
|
||||||
// total metrics added
|
|
||||||
total int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuffer returns a Buffer
|
|
||||||
// size is the maximum number of metrics that Buffer will cache. If Add is
|
|
||||||
// called when the buffer is full, then the oldest metric(s) will be dropped.
|
|
||||||
func NewBuffer(size int) *Buffer {
|
|
||||||
return &Buffer{
|
|
||||||
buf: make(chan telegraf.Metric, size),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEmpty returns true if Buffer is empty.
|
|
||||||
func (b *Buffer) IsEmpty() bool {
|
|
||||||
return len(b.buf) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the current length of the buffer.
|
|
||||||
func (b *Buffer) Len() int {
|
|
||||||
return len(b.buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drops returns the total number of dropped metrics that have occured in this
|
|
||||||
// buffer since instantiation.
|
|
||||||
func (b *Buffer) Drops() int {
|
|
||||||
return b.drops
|
|
||||||
}
|
|
||||||
|
|
||||||
// Total returns the total number of metrics that have been added to this buffer.
|
|
||||||
func (b *Buffer) Total() int {
|
|
||||||
return b.total
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds metrics to the buffer.
|
|
||||||
func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
|
||||||
for i, _ := range metrics {
|
|
||||||
b.total++
|
|
||||||
select {
|
|
||||||
case b.buf <- metrics[i]:
|
|
||||||
default:
|
|
||||||
b.drops++
|
|
||||||
<-b.buf
|
|
||||||
b.buf <- metrics[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Batch returns a batch of metrics of size batchSize.
|
|
||||||
// the batch will be of maximum length batchSize. It can be less than batchSize,
|
|
||||||
// if the length of Buffer is less than batchSize.
|
|
||||||
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
|
|
||||||
n := min(len(b.buf), batchSize)
|
|
||||||
out := make([]telegraf.Metric, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
out[i] = <-b.buf
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func min(a, b int) int {
|
|
||||||
if b < a {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
@@ -1,94 +0,0 @@
|
|||||||
package buffer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
var metricList = []telegraf.Metric{
|
|
||||||
testutil.TestMetric(2, "mymetric1"),
|
|
||||||
testutil.TestMetric(1, "mymetric2"),
|
|
||||||
testutil.TestMetric(11, "mymetric3"),
|
|
||||||
testutil.TestMetric(15, "mymetric4"),
|
|
||||||
testutil.TestMetric(8, "mymetric5"),
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkAddMetrics(b *testing.B) {
|
|
||||||
buf := NewBuffer(10000)
|
|
||||||
m := testutil.TestMetric(1, "mymetric")
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
buf.Add(m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewBufferBasicFuncs(t *testing.T) {
|
|
||||||
b := NewBuffer(10)
|
|
||||||
|
|
||||||
assert.True(t, b.IsEmpty())
|
|
||||||
assert.Zero(t, b.Len())
|
|
||||||
assert.Zero(t, b.Drops())
|
|
||||||
assert.Zero(t, b.Total())
|
|
||||||
|
|
||||||
m := testutil.TestMetric(1, "mymetric")
|
|
||||||
b.Add(m)
|
|
||||||
assert.False(t, b.IsEmpty())
|
|
||||||
assert.Equal(t, b.Len(), 1)
|
|
||||||
assert.Equal(t, b.Drops(), 0)
|
|
||||||
assert.Equal(t, b.Total(), 1)
|
|
||||||
|
|
||||||
b.Add(metricList...)
|
|
||||||
assert.False(t, b.IsEmpty())
|
|
||||||
assert.Equal(t, b.Len(), 6)
|
|
||||||
assert.Equal(t, b.Drops(), 0)
|
|
||||||
assert.Equal(t, b.Total(), 6)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDroppingMetrics(t *testing.T) {
|
|
||||||
b := NewBuffer(10)
|
|
||||||
|
|
||||||
// Add up to the size of the buffer
|
|
||||||
b.Add(metricList...)
|
|
||||||
b.Add(metricList...)
|
|
||||||
assert.False(t, b.IsEmpty())
|
|
||||||
assert.Equal(t, b.Len(), 10)
|
|
||||||
assert.Equal(t, b.Drops(), 0)
|
|
||||||
assert.Equal(t, b.Total(), 10)
|
|
||||||
|
|
||||||
// Add 5 more and verify they were dropped
|
|
||||||
b.Add(metricList...)
|
|
||||||
assert.False(t, b.IsEmpty())
|
|
||||||
assert.Equal(t, b.Len(), 10)
|
|
||||||
assert.Equal(t, b.Drops(), 5)
|
|
||||||
assert.Equal(t, b.Total(), 15)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGettingBatches(t *testing.T) {
|
|
||||||
b := NewBuffer(20)
|
|
||||||
|
|
||||||
// Verify that the buffer returned is smaller than requested when there are
|
|
||||||
// not as many items as requested.
|
|
||||||
b.Add(metricList...)
|
|
||||||
batch := b.Batch(10)
|
|
||||||
assert.Len(t, batch, 5)
|
|
||||||
|
|
||||||
// Verify that the buffer is now empty
|
|
||||||
assert.True(t, b.IsEmpty())
|
|
||||||
assert.Zero(t, b.Len())
|
|
||||||
assert.Zero(t, b.Drops())
|
|
||||||
assert.Equal(t, b.Total(), 5)
|
|
||||||
|
|
||||||
// Verify that the buffer returned is not more than the size requested
|
|
||||||
b.Add(metricList...)
|
|
||||||
batch = b.Batch(3)
|
|
||||||
assert.Len(t, batch, 3)
|
|
||||||
|
|
||||||
// Verify that buffer is not empty
|
|
||||||
assert.False(t, b.IsEmpty())
|
|
||||||
assert.Equal(t, b.Len(), 2)
|
|
||||||
assert.Equal(t, b.Drops(), 0)
|
|
||||||
assert.Equal(t, b.Total(), 10)
|
|
||||||
}
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
package aws
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CredentialConfig struct {
|
|
||||||
Region string
|
|
||||||
AccessKey string
|
|
||||||
SecretKey string
|
|
||||||
RoleARN string
|
|
||||||
Profile string
|
|
||||||
Filename string
|
|
||||||
Token string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CredentialConfig) Credentials() client.ConfigProvider {
|
|
||||||
if c.RoleARN != "" {
|
|
||||||
return c.assumeCredentials()
|
|
||||||
} else {
|
|
||||||
return c.rootCredentials()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
|
|
||||||
config := &aws.Config{
|
|
||||||
Region: aws.String(c.Region),
|
|
||||||
}
|
|
||||||
if c.AccessKey != "" || c.SecretKey != "" {
|
|
||||||
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
|
|
||||||
} else if c.Profile != "" || c.Filename != "" {
|
|
||||||
config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile)
|
|
||||||
}
|
|
||||||
|
|
||||||
return session.New(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CredentialConfig) assumeCredentials() client.ConfigProvider {
|
|
||||||
rootCredentials := c.rootCredentials()
|
|
||||||
config := &aws.Config{
|
|
||||||
Region: aws.String(c.Region),
|
|
||||||
}
|
|
||||||
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
|
|
||||||
return session.New(config)
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,99 +1,48 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/internal/models"
|
"github.com/influxdb/telegraf/plugins"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdb/telegraf/plugins/exec"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
"github.com/influxdb/telegraf/plugins/memcached"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
"github.com/influxdb/telegraf/plugins/procstat"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
|
||||||
"github.com/influxdata/telegraf/plugins/parsers"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
func TestConfig_LoadSinglePlugin(t *testing.T) {
|
||||||
c := NewConfig()
|
|
||||||
err := os.Setenv("MY_TEST_SERVER", "192.168.1.1")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
err = os.Setenv("TEST_INTERVAL", "10s")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
c.LoadConfig("./testdata/single_plugin_env_vars.toml")
|
|
||||||
|
|
||||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
|
||||||
memcached.Servers = []string{"192.168.1.1"}
|
|
||||||
|
|
||||||
filter := models.Filter{
|
|
||||||
NameDrop: []string{"metricname2"},
|
|
||||||
NamePass: []string{"metricname1"},
|
|
||||||
FieldDrop: []string{"other", "stuff"},
|
|
||||||
FieldPass: []string{"some", "strings"},
|
|
||||||
TagDrop: []models.TagFilter{
|
|
||||||
models.TagFilter{
|
|
||||||
Name: "badtag",
|
|
||||||
Filter: []string{"othertag"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
TagPass: []models.TagFilter{
|
|
||||||
models.TagFilter{
|
|
||||||
Name: "goodtag",
|
|
||||||
Filter: []string{"mytag"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.NoError(t, filter.Compile())
|
|
||||||
mConfig := &models.InputConfig{
|
|
||||||
Name: "memcached",
|
|
||||||
Filter: filter,
|
|
||||||
Interval: 10 * time.Second,
|
|
||||||
}
|
|
||||||
mConfig.Tags = make(map[string]string)
|
|
||||||
|
|
||||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
|
||||||
"Testdata did not produce a correct memcached struct.")
|
|
||||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
|
||||||
"Testdata did not produce correct memcached metadata.")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfig_LoadSingleInput(t *testing.T) {
|
|
||||||
c := NewConfig()
|
c := NewConfig()
|
||||||
c.LoadConfig("./testdata/single_plugin.toml")
|
c.LoadConfig("./testdata/single_plugin.toml")
|
||||||
|
|
||||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
||||||
memcached.Servers = []string{"localhost"}
|
memcached.Servers = []string{"localhost"}
|
||||||
|
|
||||||
filter := models.Filter{
|
mConfig := &PluginConfig{
|
||||||
NameDrop: []string{"metricname2"},
|
Name: "memcached",
|
||||||
NamePass: []string{"metricname1"},
|
Filter: Filter{
|
||||||
FieldDrop: []string{"other", "stuff"},
|
Drop: []string{"other", "stuff"},
|
||||||
FieldPass: []string{"some", "strings"},
|
Pass: []string{"some", "strings"},
|
||||||
TagDrop: []models.TagFilter{
|
TagDrop: []TagFilter{
|
||||||
models.TagFilter{
|
TagFilter{
|
||||||
Name: "badtag",
|
Name: "badtag",
|
||||||
Filter: []string{"othertag"},
|
Filter: []string{"othertag"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
TagPass: []TagFilter{
|
||||||
TagPass: []models.TagFilter{
|
TagFilter{
|
||||||
models.TagFilter{
|
Name: "goodtag",
|
||||||
Name: "goodtag",
|
Filter: []string{"mytag"},
|
||||||
Filter: []string{"mytag"},
|
},
|
||||||
},
|
},
|
||||||
|
IsActive: true,
|
||||||
},
|
},
|
||||||
}
|
|
||||||
assert.NoError(t, filter.Compile())
|
|
||||||
mConfig := &models.InputConfig{
|
|
||||||
Name: "memcached",
|
|
||||||
Filter: filter,
|
|
||||||
Interval: 5 * time.Second,
|
Interval: 5 * time.Second,
|
||||||
}
|
}
|
||||||
mConfig.Tags = make(map[string]string)
|
|
||||||
|
|
||||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
assert.Equal(t, memcached, c.Plugins[0].Plugin,
|
||||||
"Testdata did not produce a correct memcached struct.")
|
"Testdata did not produce a correct memcached struct.")
|
||||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
assert.Equal(t, mConfig, c.Plugins[0].Config,
|
||||||
"Testdata did not produce correct memcached metadata.")
|
"Testdata did not produce correct memcached metadata.")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,69 +57,240 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
|||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
|
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
|
||||||
memcached.Servers = []string{"localhost"}
|
memcached.Servers = []string{"localhost"}
|
||||||
|
|
||||||
filter := models.Filter{
|
mConfig := &PluginConfig{
|
||||||
NameDrop: []string{"metricname2"},
|
Name: "memcached",
|
||||||
NamePass: []string{"metricname1"},
|
Filter: Filter{
|
||||||
FieldDrop: []string{"other", "stuff"},
|
Drop: []string{"other", "stuff"},
|
||||||
FieldPass: []string{"some", "strings"},
|
Pass: []string{"some", "strings"},
|
||||||
TagDrop: []models.TagFilter{
|
TagDrop: []TagFilter{
|
||||||
models.TagFilter{
|
TagFilter{
|
||||||
Name: "badtag",
|
Name: "badtag",
|
||||||
Filter: []string{"othertag"},
|
Filter: []string{"othertag"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
TagPass: []TagFilter{
|
||||||
TagPass: []models.TagFilter{
|
TagFilter{
|
||||||
models.TagFilter{
|
Name: "goodtag",
|
||||||
Name: "goodtag",
|
Filter: []string{"mytag"},
|
||||||
Filter: []string{"mytag"},
|
},
|
||||||
},
|
},
|
||||||
|
IsActive: true,
|
||||||
},
|
},
|
||||||
}
|
|
||||||
assert.NoError(t, filter.Compile())
|
|
||||||
mConfig := &models.InputConfig{
|
|
||||||
Name: "memcached",
|
|
||||||
Filter: filter,
|
|
||||||
Interval: 5 * time.Second,
|
Interval: 5 * time.Second,
|
||||||
}
|
}
|
||||||
mConfig.Tags = make(map[string]string)
|
assert.Equal(t, memcached, c.Plugins[0].Plugin,
|
||||||
|
|
||||||
assert.Equal(t, memcached, c.Inputs[0].Input,
|
|
||||||
"Testdata did not produce a correct memcached struct.")
|
"Testdata did not produce a correct memcached struct.")
|
||||||
assert.Equal(t, mConfig, c.Inputs[0].Config,
|
assert.Equal(t, mConfig, c.Plugins[0].Config,
|
||||||
"Testdata did not produce correct memcached metadata.")
|
"Testdata did not produce correct memcached metadata.")
|
||||||
|
|
||||||
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
ex := plugins.Plugins["exec"]().(*exec.Exec)
|
||||||
p, err := parsers.NewJSONParser("exec", nil, nil)
|
ex.Commands = []*exec.Command{
|
||||||
assert.NoError(t, err)
|
&exec.Command{
|
||||||
ex.SetParser(p)
|
Command: "/usr/bin/myothercollector --foo=bar",
|
||||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
Name: "myothercollector",
|
||||||
eConfig := &models.InputConfig{
|
},
|
||||||
Name: "exec",
|
|
||||||
MeasurementSuffix: "_myothercollector",
|
|
||||||
}
|
}
|
||||||
eConfig.Tags = make(map[string]string)
|
eConfig := &PluginConfig{Name: "exec"}
|
||||||
assert.Equal(t, ex, c.Inputs[1].Input,
|
assert.Equal(t, ex, c.Plugins[1].Plugin,
|
||||||
"Merged Testdata did not produce a correct exec struct.")
|
"Merged Testdata did not produce a correct exec struct.")
|
||||||
assert.Equal(t, eConfig, c.Inputs[1].Config,
|
assert.Equal(t, eConfig, c.Plugins[1].Config,
|
||||||
"Merged Testdata did not produce correct exec metadata.")
|
"Merged Testdata did not produce correct exec metadata.")
|
||||||
|
|
||||||
memcached.Servers = []string{"192.168.1.1"}
|
memcached.Servers = []string{"192.168.1.1"}
|
||||||
assert.Equal(t, memcached, c.Inputs[2].Input,
|
assert.Equal(t, memcached, c.Plugins[2].Plugin,
|
||||||
"Testdata did not produce a correct memcached struct.")
|
"Testdata did not produce a correct memcached struct.")
|
||||||
assert.Equal(t, mConfig, c.Inputs[2].Config,
|
assert.Equal(t, mConfig, c.Plugins[2].Config,
|
||||||
"Testdata did not produce correct memcached metadata.")
|
"Testdata did not produce correct memcached metadata.")
|
||||||
|
|
||||||
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
|
pstat := plugins.Plugins["procstat"]().(*procstat.Procstat)
|
||||||
pstat.PidFile = "/var/run/grafana-server.pid"
|
pstat.Specifications = []*procstat.Specification{
|
||||||
|
&procstat.Specification{
|
||||||
|
PidFile: "/var/run/grafana-server.pid",
|
||||||
|
},
|
||||||
|
&procstat.Specification{
|
||||||
|
PidFile: "/var/run/influxdb/influxd.pid",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
pConfig := &models.InputConfig{Name: "procstat"}
|
pConfig := &PluginConfig{Name: "procstat"}
|
||||||
pConfig.Tags = make(map[string]string)
|
|
||||||
|
|
||||||
assert.Equal(t, pstat, c.Inputs[3].Input,
|
assert.Equal(t, pstat, c.Plugins[3].Plugin,
|
||||||
"Merged Testdata did not produce a correct procstat struct.")
|
"Merged Testdata did not produce a correct procstat struct.")
|
||||||
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
assert.Equal(t, pConfig, c.Plugins[3].Config,
|
||||||
"Merged Testdata did not produce correct procstat metadata.")
|
"Merged Testdata did not produce correct procstat metadata.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFilter_Empty(t *testing.T) {
|
||||||
|
f := Filter{}
|
||||||
|
|
||||||
|
measurements := []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
"barfoo",
|
||||||
|
"foo_bar",
|
||||||
|
"foo.bar",
|
||||||
|
"foo-bar",
|
||||||
|
"supercalifradjulisticexpialidocious",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range measurements {
|
||||||
|
if !f.ShouldPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to pass", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_Pass(t *testing.T) {
|
||||||
|
f := Filter{
|
||||||
|
Pass: []string{"foo*", "cpu_usage_idle"},
|
||||||
|
}
|
||||||
|
|
||||||
|
passes := []string{
|
||||||
|
"foo",
|
||||||
|
"foo_bar",
|
||||||
|
"foo.bar",
|
||||||
|
"foo-bar",
|
||||||
|
"cpu_usage_idle",
|
||||||
|
}
|
||||||
|
|
||||||
|
drops := []string{
|
||||||
|
"bar",
|
||||||
|
"barfoo",
|
||||||
|
"bar_foo",
|
||||||
|
"cpu_usage_busy",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range passes {
|
||||||
|
if !f.ShouldPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to pass", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range drops {
|
||||||
|
if f.ShouldPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to drop", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_Drop(t *testing.T) {
|
||||||
|
f := Filter{
|
||||||
|
Drop: []string{"foo*", "cpu_usage_idle"},
|
||||||
|
}
|
||||||
|
|
||||||
|
drops := []string{
|
||||||
|
"foo",
|
||||||
|
"foo_bar",
|
||||||
|
"foo.bar",
|
||||||
|
"foo-bar",
|
||||||
|
"cpu_usage_idle",
|
||||||
|
}
|
||||||
|
|
||||||
|
passes := []string{
|
||||||
|
"bar",
|
||||||
|
"barfoo",
|
||||||
|
"bar_foo",
|
||||||
|
"cpu_usage_busy",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range passes {
|
||||||
|
if !f.ShouldPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to pass", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, measurement := range drops {
|
||||||
|
if f.ShouldPass(measurement) {
|
||||||
|
t.Errorf("Expected measurement %s to drop", measurement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_TagPass(t *testing.T) {
|
||||||
|
filters := []TagFilter{
|
||||||
|
TagFilter{
|
||||||
|
Name: "cpu",
|
||||||
|
Filter: []string{"cpu-*"},
|
||||||
|
},
|
||||||
|
TagFilter{
|
||||||
|
Name: "mem",
|
||||||
|
Filter: []string{"mem_free"},
|
||||||
|
}}
|
||||||
|
f := Filter{
|
||||||
|
TagPass: filters,
|
||||||
|
}
|
||||||
|
|
||||||
|
passes := []map[string]string{
|
||||||
|
{"cpu": "cpu-total"},
|
||||||
|
{"cpu": "cpu-0"},
|
||||||
|
{"cpu": "cpu-1"},
|
||||||
|
{"cpu": "cpu-2"},
|
||||||
|
{"mem": "mem_free"},
|
||||||
|
}
|
||||||
|
|
||||||
|
drops := []map[string]string{
|
||||||
|
{"cpu": "cputotal"},
|
||||||
|
{"cpu": "cpu0"},
|
||||||
|
{"cpu": "cpu1"},
|
||||||
|
{"cpu": "cpu2"},
|
||||||
|
{"mem": "mem_used"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tags := range passes {
|
||||||
|
if !f.ShouldTagsPass(tags) {
|
||||||
|
t.Errorf("Expected tags %v to pass", tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tags := range drops {
|
||||||
|
if f.ShouldTagsPass(tags) {
|
||||||
|
t.Errorf("Expected tags %v to drop", tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilter_TagDrop(t *testing.T) {
|
||||||
|
filters := []TagFilter{
|
||||||
|
TagFilter{
|
||||||
|
Name: "cpu",
|
||||||
|
Filter: []string{"cpu-*"},
|
||||||
|
},
|
||||||
|
TagFilter{
|
||||||
|
Name: "mem",
|
||||||
|
Filter: []string{"mem_free"},
|
||||||
|
}}
|
||||||
|
f := Filter{
|
||||||
|
TagDrop: filters,
|
||||||
|
}
|
||||||
|
|
||||||
|
drops := []map[string]string{
|
||||||
|
{"cpu": "cpu-total"},
|
||||||
|
{"cpu": "cpu-0"},
|
||||||
|
{"cpu": "cpu-1"},
|
||||||
|
{"cpu": "cpu-2"},
|
||||||
|
{"mem": "mem_free"},
|
||||||
|
}
|
||||||
|
|
||||||
|
passes := []map[string]string{
|
||||||
|
{"cpu": "cputotal"},
|
||||||
|
{"cpu": "cpu0"},
|
||||||
|
{"cpu": "cpu1"},
|
||||||
|
{"cpu": "cpu2"},
|
||||||
|
{"mem": "mem_used"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tags := range passes {
|
||||||
|
if !f.ShouldTagsPass(tags) {
|
||||||
|
t.Errorf("Expected tags %v to pass", tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tags := range drops {
|
||||||
|
if f.ShouldTagsPass(tags) {
|
||||||
|
t.Errorf("Expected tags %v to drop", tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
12
internal/config/testdata/single_plugin.toml
vendored
12
internal/config/testdata/single_plugin.toml
vendored
@@ -1,11 +1,9 @@
|
|||||||
[[inputs.memcached]]
|
[[plugins.memcached]]
|
||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
namepass = ["metricname1"]
|
pass = ["some", "strings"]
|
||||||
namedrop = ["metricname2"]
|
drop = ["other", "stuff"]
|
||||||
fieldpass = ["some", "strings"]
|
|
||||||
fielddrop = ["other", "stuff"]
|
|
||||||
interval = "5s"
|
interval = "5s"
|
||||||
[inputs.memcached.tagpass]
|
[plugins.memcached.tagpass]
|
||||||
goodtag = ["mytag"]
|
goodtag = ["mytag"]
|
||||||
[inputs.memcached.tagdrop]
|
[plugins.memcached.tagdrop]
|
||||||
badtag = ["othertag"]
|
badtag = ["othertag"]
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
[[inputs.memcached]]
|
|
||||||
servers = ["$MY_TEST_SERVER"]
|
|
||||||
namepass = ["metricname1"]
|
|
||||||
namedrop = ["metricname2"]
|
|
||||||
fieldpass = ["some", "strings"]
|
|
||||||
fielddrop = ["other", "stuff"]
|
|
||||||
interval = "$TEST_INTERVAL"
|
|
||||||
[inputs.memcached.tagpass]
|
|
||||||
goodtag = ["mytag"]
|
|
||||||
[inputs.memcached.tagdrop]
|
|
||||||
badtag = ["othertag"]
|
|
||||||
8
internal/config/testdata/subconfig/exec.conf
vendored
8
internal/config/testdata/subconfig/exec.conf
vendored
@@ -1,4 +1,8 @@
|
|||||||
[[inputs.exec]]
|
[[plugins.exec]]
|
||||||
|
# specify commands via an array of tables
|
||||||
|
[[plugins.exec.commands]]
|
||||||
# the command to run
|
# the command to run
|
||||||
command = "/usr/bin/myothercollector --foo=bar"
|
command = "/usr/bin/myothercollector --foo=bar"
|
||||||
name_suffix = "_myothercollector"
|
|
||||||
|
# name of the command (used as a prefix for measurements)
|
||||||
|
name = "myothercollector"
|
||||||
|
|||||||
@@ -1,11 +1,9 @@
|
|||||||
[[inputs.memcached]]
|
[[plugins.memcached]]
|
||||||
servers = ["192.168.1.1"]
|
servers = ["192.168.1.1"]
|
||||||
namepass = ["metricname1"]
|
|
||||||
namedrop = ["metricname2"]
|
|
||||||
pass = ["some", "strings"]
|
pass = ["some", "strings"]
|
||||||
drop = ["other", "stuff"]
|
drop = ["other", "stuff"]
|
||||||
interval = "5s"
|
interval = "5s"
|
||||||
[inputs.memcached.tagpass]
|
[plugins.memcached.tagpass]
|
||||||
goodtag = ["mytag"]
|
goodtag = ["mytag"]
|
||||||
[inputs.memcached.tagdrop]
|
[plugins.memcached.tagdrop]
|
||||||
badtag = ["othertag"]
|
badtag = ["othertag"]
|
||||||
|
|||||||
@@ -1,2 +1,5 @@
|
|||||||
[[inputs.procstat]]
|
[[plugins.procstat]]
|
||||||
|
[[plugins.procstat.specifications]]
|
||||||
pid_file = "/var/run/grafana-server.pid"
|
pid_file = "/var/run/grafana-server.pid"
|
||||||
|
[[plugins.procstat.specifications]]
|
||||||
|
pid_file = "/var/run/influxdb/influxd.pid"
|
||||||
|
|||||||
132
internal/config/testdata/telegraf-agent.toml
vendored
132
internal/config/testdata/telegraf-agent.toml
vendored
@@ -1,7 +1,7 @@
|
|||||||
# Telegraf configuration
|
# Telegraf configuration
|
||||||
|
|
||||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||||
# declared inputs.
|
# declared plugins.
|
||||||
|
|
||||||
# Even if a plugin has no configuration, it must be declared in here
|
# Even if a plugin has no configuration, it must be declared in here
|
||||||
# to be active. Declaring a plugin means just specifying the name
|
# to be active. Declaring a plugin means just specifying the name
|
||||||
@@ -20,14 +20,21 @@
|
|||||||
# with 'required'. Be sure to edit those to make this configuration work.
|
# with 'required'. Be sure to edit those to make this configuration work.
|
||||||
|
|
||||||
# Tags can also be specified via a normal map, but only one form at a time:
|
# Tags can also be specified via a normal map, but only one form at a time:
|
||||||
[global_tags]
|
[tags]
|
||||||
dc = "us-east-1"
|
# dc = "us-east-1"
|
||||||
|
|
||||||
# Configuration for telegraf agent
|
# Configuration for telegraf agent
|
||||||
[agent]
|
[agent]
|
||||||
# Default data collection interval for all plugins
|
# Default data collection interval for all plugins
|
||||||
interval = "10s"
|
interval = "10s"
|
||||||
|
|
||||||
|
# If utc = false, uses local time (utc is highly recommended)
|
||||||
|
utc = true
|
||||||
|
|
||||||
|
# Precision of writes, valid values are n, u, ms, s, m, and h
|
||||||
|
# note: using second precision greatly helps InfluxDB compression
|
||||||
|
precision = "s"
|
||||||
|
|
||||||
# run telegraf in debug mode
|
# run telegraf in debug mode
|
||||||
debug = false
|
debug = false
|
||||||
|
|
||||||
@@ -39,6 +46,8 @@
|
|||||||
# OUTPUTS #
|
# OUTPUTS #
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
|
[outputs]
|
||||||
|
|
||||||
# Configuration for influxdb server to send metrics to
|
# Configuration for influxdb server to send metrics to
|
||||||
[[outputs.influxdb]]
|
[[outputs.influxdb]]
|
||||||
# The full HTTP endpoint URL for your InfluxDB instance
|
# The full HTTP endpoint URL for your InfluxDB instance
|
||||||
@@ -49,6 +58,17 @@
|
|||||||
# The target database for metrics. This database must already exist
|
# The target database for metrics. This database must already exist
|
||||||
database = "telegraf" # required.
|
database = "telegraf" # required.
|
||||||
|
|
||||||
|
# Connection timeout (for the connection with InfluxDB), formatted as a string.
|
||||||
|
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
# If not provided, will default to 0 (no timeout)
|
||||||
|
# timeout = "5s"
|
||||||
|
|
||||||
|
# username = "telegraf"
|
||||||
|
# password = "metricsmetricsmetricsmetrics"
|
||||||
|
|
||||||
|
# Set the user agent for the POSTs (can be useful for log differentiation)
|
||||||
|
# user_agent = "telegraf"
|
||||||
|
|
||||||
[[outputs.influxdb]]
|
[[outputs.influxdb]]
|
||||||
urls = ["udp://localhost:8089"]
|
urls = ["udp://localhost:8089"]
|
||||||
database = "udp-telegraf"
|
database = "udp-telegraf"
|
||||||
@@ -68,13 +88,15 @@
|
|||||||
# PLUGINS #
|
# PLUGINS #
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
|
[plugins]
|
||||||
|
|
||||||
# Read Apache status information (mod_status)
|
# Read Apache status information (mod_status)
|
||||||
[[inputs.apache]]
|
[[plugins.apache]]
|
||||||
# An array of Apache status URI to gather stats.
|
# An array of Apache status URI to gather stats.
|
||||||
urls = ["http://localhost/server-status?auto"]
|
urls = ["http://localhost/server-status?auto"]
|
||||||
|
|
||||||
# Read metrics about cpu usage
|
# Read metrics about cpu usage
|
||||||
[[inputs.cpu]]
|
[[plugins.cpu]]
|
||||||
# Whether to report per-cpu stats or not
|
# Whether to report per-cpu stats or not
|
||||||
percpu = true
|
percpu = true
|
||||||
# Whether to report total system cpu stats or not
|
# Whether to report total system cpu stats or not
|
||||||
@@ -83,11 +105,11 @@
|
|||||||
drop = ["cpu_time"]
|
drop = ["cpu_time"]
|
||||||
|
|
||||||
# Read metrics about disk usage by mount point
|
# Read metrics about disk usage by mount point
|
||||||
[[inputs.diskio]]
|
[[plugins.diskio]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|
||||||
# Read metrics from one or many disque servers
|
# Read metrics from one or many disque servers
|
||||||
[[inputs.disque]]
|
[[plugins.disque]]
|
||||||
# An array of URI to gather stats about. Specify an ip or hostname
|
# An array of URI to gather stats about. Specify an ip or hostname
|
||||||
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
|
||||||
# 10.0.0.1:10000, etc.
|
# 10.0.0.1:10000, etc.
|
||||||
@@ -96,7 +118,7 @@
|
|||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
|
|
||||||
# Read stats from one or more Elasticsearch servers or clusters
|
# Read stats from one or more Elasticsearch servers or clusters
|
||||||
[[inputs.elasticsearch]]
|
[[plugins.elasticsearch]]
|
||||||
# specify a list of one or more Elasticsearch servers
|
# specify a list of one or more Elasticsearch servers
|
||||||
servers = ["http://localhost:9200"]
|
servers = ["http://localhost:9200"]
|
||||||
|
|
||||||
@@ -105,13 +127,17 @@
|
|||||||
local = true
|
local = true
|
||||||
|
|
||||||
# Read flattened metrics from one or more commands that output JSON to stdout
|
# Read flattened metrics from one or more commands that output JSON to stdout
|
||||||
[[inputs.exec]]
|
[[plugins.exec]]
|
||||||
|
# specify commands via an array of tables
|
||||||
|
[[exec.commands]]
|
||||||
# the command to run
|
# the command to run
|
||||||
command = "/usr/bin/mycollector --foo=bar"
|
command = "/usr/bin/mycollector --foo=bar"
|
||||||
name_suffix = "_mycollector"
|
|
||||||
|
# name of the command (used as a prefix for measurements)
|
||||||
|
name = "mycollector"
|
||||||
|
|
||||||
# Read metrics of haproxy, via socket or csv stats page
|
# Read metrics of haproxy, via socket or csv stats page
|
||||||
[[inputs.haproxy]]
|
[[plugins.haproxy]]
|
||||||
# An array of address to gather stats about. Specify an ip on hostname
|
# An array of address to gather stats about. Specify an ip on hostname
|
||||||
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
# with optional port. ie localhost, 10.10.3.33:1936, etc.
|
||||||
#
|
#
|
||||||
@@ -121,30 +147,33 @@
|
|||||||
# servers = ["socket:/run/haproxy/admin.sock"]
|
# servers = ["socket:/run/haproxy/admin.sock"]
|
||||||
|
|
||||||
# Read flattened metrics from one or more JSON HTTP endpoints
|
# Read flattened metrics from one or more JSON HTTP endpoints
|
||||||
[[inputs.httpjson]]
|
[[plugins.httpjson]]
|
||||||
# a name for the service being polled
|
# Specify services via an array of tables
|
||||||
name = "webserver_stats"
|
[[httpjson.services]]
|
||||||
|
|
||||||
# URL of each server in the service's cluster
|
# a name for the service being polled
|
||||||
servers = [
|
name = "webserver_stats"
|
||||||
"http://localhost:9999/stats/",
|
|
||||||
"http://localhost:9998/stats/",
|
|
||||||
]
|
|
||||||
|
|
||||||
# HTTP method to use (case-sensitive)
|
# URL of each server in the service's cluster
|
||||||
method = "GET"
|
servers = [
|
||||||
|
"http://localhost:9999/stats/",
|
||||||
|
"http://localhost:9998/stats/",
|
||||||
|
]
|
||||||
|
|
||||||
# HTTP parameters (all values must be strings)
|
# HTTP method to use (case-sensitive)
|
||||||
[httpjson.parameters]
|
method = "GET"
|
||||||
event_type = "cpu_spike"
|
|
||||||
threshold = "0.75"
|
# HTTP parameters (all values must be strings)
|
||||||
|
[httpjson.services.parameters]
|
||||||
|
event_type = "cpu_spike"
|
||||||
|
threshold = "0.75"
|
||||||
|
|
||||||
# Read metrics about disk IO by device
|
# Read metrics about disk IO by device
|
||||||
[[inputs.diskio]]
|
[[plugins.io]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|
||||||
# read metrics from a Kafka topic
|
# read metrics from a Kafka topic
|
||||||
[[inputs.kafka_consumer]]
|
[[plugins.kafka_consumer]]
|
||||||
# topic(s) to consume
|
# topic(s) to consume
|
||||||
topics = ["telegraf"]
|
topics = ["telegraf"]
|
||||||
# an array of Zookeeper connection strings
|
# an array of Zookeeper connection strings
|
||||||
@@ -157,7 +186,7 @@
|
|||||||
offset = "oldest"
|
offset = "oldest"
|
||||||
|
|
||||||
# Read metrics from a LeoFS Server via SNMP
|
# Read metrics from a LeoFS Server via SNMP
|
||||||
[[inputs.leofs]]
|
[[plugins.leofs]]
|
||||||
# An array of URI to gather stats about LeoFS.
|
# An array of URI to gather stats about LeoFS.
|
||||||
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
# Specify an ip or hostname with port. ie 127.0.0.1:4020
|
||||||
#
|
#
|
||||||
@@ -165,7 +194,7 @@
|
|||||||
servers = ["127.0.0.1:4021"]
|
servers = ["127.0.0.1:4021"]
|
||||||
|
|
||||||
# Read metrics from local Lustre service on OST, MDS
|
# Read metrics from local Lustre service on OST, MDS
|
||||||
[[inputs.lustre2]]
|
[[plugins.lustre2]]
|
||||||
# An array of /proc globs to search for Lustre stats
|
# An array of /proc globs to search for Lustre stats
|
||||||
# If not specified, the default will work on Lustre 2.5.x
|
# If not specified, the default will work on Lustre 2.5.x
|
||||||
#
|
#
|
||||||
@@ -173,28 +202,19 @@
|
|||||||
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
|
||||||
|
|
||||||
# Read metrics about memory usage
|
# Read metrics about memory usage
|
||||||
[[inputs.mem]]
|
[[plugins.mem]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|
||||||
# Read metrics from one or many memcached servers
|
# Read metrics from one or many memcached servers
|
||||||
[[inputs.memcached]]
|
[[plugins.memcached]]
|
||||||
# An array of address to gather stats about. Specify an ip on hostname
|
# An array of address to gather stats about. Specify an ip on hostname
|
||||||
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
# with optional port. ie localhost, 10.0.0.1:11211, etc.
|
||||||
#
|
#
|
||||||
# If no servers are specified, then localhost is used as the host.
|
# If no servers are specified, then localhost is used as the host.
|
||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
|
|
||||||
# Telegraf plugin for gathering metrics from N Mesos masters
|
|
||||||
[[inputs.mesos]]
|
|
||||||
# Timeout, in ms.
|
|
||||||
timeout = 100
|
|
||||||
# A list of Mesos masters, default value is localhost:5050.
|
|
||||||
masters = ["localhost:5050"]
|
|
||||||
# Metrics groups to be collected, by default, all enabled.
|
|
||||||
master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"]
|
|
||||||
|
|
||||||
# Read metrics from one or many MongoDB servers
|
# Read metrics from one or many MongoDB servers
|
||||||
[[inputs.mongodb]]
|
[[plugins.mongodb]]
|
||||||
# An array of URI to gather stats about. Specify an ip or hostname
|
# An array of URI to gather stats about. Specify an ip or hostname
|
||||||
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
|
||||||
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||||
@@ -203,7 +223,7 @@
|
|||||||
servers = ["127.0.0.1:27017"]
|
servers = ["127.0.0.1:27017"]
|
||||||
|
|
||||||
# Read metrics from one or many mysql servers
|
# Read metrics from one or many mysql servers
|
||||||
[[inputs.mysql]]
|
[[plugins.mysql]]
|
||||||
# specify servers via a url matching:
|
# specify servers via a url matching:
|
||||||
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||||
# e.g.
|
# e.g.
|
||||||
@@ -214,7 +234,7 @@
|
|||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
|
|
||||||
# Read metrics about network interface usage
|
# Read metrics about network interface usage
|
||||||
[[inputs.net]]
|
[[plugins.net]]
|
||||||
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
# By default, telegraf gathers stats from any up interface (excluding loopback)
|
||||||
# Setting interfaces will tell it to gather these explicit interfaces,
|
# Setting interfaces will tell it to gather these explicit interfaces,
|
||||||
# regardless of status.
|
# regardless of status.
|
||||||
@@ -222,12 +242,12 @@
|
|||||||
# interfaces = ["eth0", ... ]
|
# interfaces = ["eth0", ... ]
|
||||||
|
|
||||||
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
# Read Nginx's basic status information (ngx_http_stub_status_module)
|
||||||
[[inputs.nginx]]
|
[[plugins.nginx]]
|
||||||
# An array of Nginx stub_status URI to gather stats.
|
# An array of Nginx stub_status URI to gather stats.
|
||||||
urls = ["http://localhost/status"]
|
urls = ["http://localhost/status"]
|
||||||
|
|
||||||
# Ping given url(s) and return statistics
|
# Ping given url(s) and return statistics
|
||||||
[[inputs.ping]]
|
[[plugins.ping]]
|
||||||
# urls to ping
|
# urls to ping
|
||||||
urls = ["www.google.com"] # required
|
urls = ["www.google.com"] # required
|
||||||
# number of pings to send (ping -c <COUNT>)
|
# number of pings to send (ping -c <COUNT>)
|
||||||
@@ -240,7 +260,10 @@
|
|||||||
interface = ""
|
interface = ""
|
||||||
|
|
||||||
# Read metrics from one or many postgresql servers
|
# Read metrics from one or many postgresql servers
|
||||||
[[inputs.postgresql]]
|
[[plugins.postgresql]]
|
||||||
|
# specify servers via an array of tables
|
||||||
|
[[postgresql.servers]]
|
||||||
|
|
||||||
# specify address via a url matching:
|
# specify address via a url matching:
|
||||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||||
# or a simple string:
|
# or a simple string:
|
||||||
@@ -267,13 +290,14 @@
|
|||||||
# address = "influx@remoteserver"
|
# address = "influx@remoteserver"
|
||||||
|
|
||||||
# Read metrics from one or many prometheus clients
|
# Read metrics from one or many prometheus clients
|
||||||
[[inputs.prometheus]]
|
[[plugins.prometheus]]
|
||||||
# An array of urls to scrape metrics from.
|
# An array of urls to scrape metrics from.
|
||||||
urls = ["http://localhost:9100/metrics"]
|
urls = ["http://localhost:9100/metrics"]
|
||||||
|
|
||||||
# Read metrics from one or many RabbitMQ servers via the management API
|
# Read metrics from one or many RabbitMQ servers via the management API
|
||||||
[[inputs.rabbitmq]]
|
[[plugins.rabbitmq]]
|
||||||
# Specify servers via an array of tables
|
# Specify servers via an array of tables
|
||||||
|
[[rabbitmq.servers]]
|
||||||
# name = "rmq-server-1" # optional tag
|
# name = "rmq-server-1" # optional tag
|
||||||
# url = "http://localhost:15672"
|
# url = "http://localhost:15672"
|
||||||
# username = "guest"
|
# username = "guest"
|
||||||
@@ -284,7 +308,7 @@
|
|||||||
# nodes = ["rabbit@node1", "rabbit@node2"]
|
# nodes = ["rabbit@node1", "rabbit@node2"]
|
||||||
|
|
||||||
# Read metrics from one or many redis servers
|
# Read metrics from one or many redis servers
|
||||||
[[inputs.redis]]
|
[[plugins.redis]]
|
||||||
# An array of URI to gather stats about. Specify an ip or hostname
|
# An array of URI to gather stats about. Specify an ip or hostname
|
||||||
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
|
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
|
||||||
# 10.0.0.1:10000, etc.
|
# 10.0.0.1:10000, etc.
|
||||||
@@ -293,7 +317,7 @@
|
|||||||
servers = ["localhost"]
|
servers = ["localhost"]
|
||||||
|
|
||||||
# Read metrics from one or many RethinkDB servers
|
# Read metrics from one or many RethinkDB servers
|
||||||
[[inputs.rethinkdb]]
|
[[plugins.rethinkdb]]
|
||||||
# An array of URI to gather stats about. Specify an ip or hostname
|
# An array of URI to gather stats about. Specify an ip or hostname
|
||||||
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
|
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
|
||||||
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
|
||||||
@@ -302,9 +326,9 @@
|
|||||||
servers = ["127.0.0.1:28015"]
|
servers = ["127.0.0.1:28015"]
|
||||||
|
|
||||||
# Read metrics about swap memory usage
|
# Read metrics about swap memory usage
|
||||||
[[inputs.swap]]
|
[[plugins.swap]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|
||||||
# Read metrics about system load & uptime
|
# Read metrics about system load & uptime
|
||||||
[[inputs.system]]
|
[[plugins.system]]
|
||||||
# no configuration
|
# no configuration
|
||||||
|
|||||||
@@ -1,37 +0,0 @@
|
|||||||
package errchan
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ErrChan struct {
|
|
||||||
C chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns an error channel of max length 'n'
|
|
||||||
// errors can be sent to the ErrChan.C channel, and will be returned when
|
|
||||||
// ErrChan.Error() is called.
|
|
||||||
func New(n int) *ErrChan {
|
|
||||||
return &ErrChan{
|
|
||||||
C: make(chan error, n),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error closes the ErrChan.C channel and returns an error if there are any
|
|
||||||
// non-nil errors, otherwise returns nil.
|
|
||||||
func (e *ErrChan) Error() error {
|
|
||||||
close(e.C)
|
|
||||||
|
|
||||||
var out string
|
|
||||||
for err := range e.C {
|
|
||||||
if err != nil {
|
|
||||||
out += "[" + err.Error() + "], "
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if out != "" {
|
|
||||||
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,116 +0,0 @@
|
|||||||
package globpath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/gobwas/glob"
|
|
||||||
)
|
|
||||||
|
|
||||||
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
|
|
||||||
|
|
||||||
type GlobPath struct {
|
|
||||||
path string
|
|
||||||
hasMeta bool
|
|
||||||
hasSuperMeta bool
|
|
||||||
g glob.Glob
|
|
||||||
root string
|
|
||||||
}
|
|
||||||
|
|
||||||
func Compile(path string) (*GlobPath, error) {
|
|
||||||
out := GlobPath{
|
|
||||||
hasMeta: hasMeta(path),
|
|
||||||
hasSuperMeta: hasSuperMeta(path),
|
|
||||||
path: path,
|
|
||||||
}
|
|
||||||
|
|
||||||
// if there are no glob meta characters in the path, don't bother compiling
|
|
||||||
// a glob object or finding the root directory. (see short-circuit in Match)
|
|
||||||
if !out.hasMeta || !out.hasSuperMeta {
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Get the root directory for this filepath
|
|
||||||
out.root = findRootDir(path)
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GlobPath) Match() map[string]os.FileInfo {
|
|
||||||
if !g.hasMeta {
|
|
||||||
out := make(map[string]os.FileInfo)
|
|
||||||
info, err := os.Stat(g.path)
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
out[g.path] = info
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
if !g.hasSuperMeta {
|
|
||||||
out := make(map[string]os.FileInfo)
|
|
||||||
files, _ := filepath.Glob(g.path)
|
|
||||||
for _, file := range files {
|
|
||||||
info, err := os.Stat(file)
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
out[file] = info
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
return walkFilePath(g.root, g.g)
|
|
||||||
}
|
|
||||||
|
|
||||||
// walk the filepath from the given root and return a list of files that match
|
|
||||||
// the given glob.
|
|
||||||
func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {
|
|
||||||
matchedFiles := make(map[string]os.FileInfo)
|
|
||||||
walkfn := func(path string, info os.FileInfo, _ error) error {
|
|
||||||
if g.Match(path) {
|
|
||||||
matchedFiles[path] = info
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
filepath.Walk(root, walkfn)
|
|
||||||
return matchedFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
// find the root dir of the given path (could include globs).
|
|
||||||
// ie:
|
|
||||||
// /var/log/telegraf.conf -> /var/log
|
|
||||||
// /home/** -> /home
|
|
||||||
// /home/*/** -> /home
|
|
||||||
// /lib/share/*/*/**.txt -> /lib/share
|
|
||||||
func findRootDir(path string) string {
|
|
||||||
pathItems := strings.Split(path, sepStr)
|
|
||||||
out := sepStr
|
|
||||||
for i, item := range pathItems {
|
|
||||||
if i == len(pathItems)-1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if item == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if hasMeta(item) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
out += item + sepStr
|
|
||||||
}
|
|
||||||
if out != "/" {
|
|
||||||
out = strings.TrimSuffix(out, "/")
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// hasMeta reports whether path contains any magic glob characters.
|
|
||||||
func hasMeta(path string) bool {
|
|
||||||
return strings.IndexAny(path, "*?[") >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// hasSuperMeta reports whether path contains any super magic glob characters (**).
|
|
||||||
func hasSuperMeta(path string) bool {
|
|
||||||
return strings.Index(path, "**") >= 0
|
|
||||||
}
|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
package globpath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCompileAndMatch(t *testing.T) {
|
|
||||||
dir := getTestdataDir()
|
|
||||||
// test super asterisk
|
|
||||||
g1, err := Compile(dir + "/**")
|
|
||||||
require.NoError(t, err)
|
|
||||||
// test single asterisk
|
|
||||||
g2, err := Compile(dir + "/*.log")
|
|
||||||
require.NoError(t, err)
|
|
||||||
// test no meta characters (file exists)
|
|
||||||
g3, err := Compile(dir + "/log1.log")
|
|
||||||
require.NoError(t, err)
|
|
||||||
// test file that doesn't exist
|
|
||||||
g4, err := Compile(dir + "/i_dont_exist.log")
|
|
||||||
require.NoError(t, err)
|
|
||||||
// test super asterisk that doesn't exist
|
|
||||||
g5, err := Compile(dir + "/dir_doesnt_exist/**")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
matches := g1.Match()
|
|
||||||
assert.Len(t, matches, 3)
|
|
||||||
matches = g2.Match()
|
|
||||||
assert.Len(t, matches, 2)
|
|
||||||
matches = g3.Match()
|
|
||||||
assert.Len(t, matches, 1)
|
|
||||||
matches = g4.Match()
|
|
||||||
assert.Len(t, matches, 0)
|
|
||||||
matches = g5.Match()
|
|
||||||
assert.Len(t, matches, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFindRootDir(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
input string
|
|
||||||
output string
|
|
||||||
}{
|
|
||||||
{"/var/log/telegraf.conf", "/var/log"},
|
|
||||||
{"/home/**", "/home"},
|
|
||||||
{"/home/*/**", "/home"},
|
|
||||||
{"/lib/share/*/*/**.txt", "/lib/share"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
actual := findRootDir(test.input)
|
|
||||||
assert.Equal(t, test.output, actual)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTestdataDir() string {
|
|
||||||
_, filename, _, _ := runtime.Caller(1)
|
|
||||||
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
|
|
||||||
}
|
|
||||||
0
internal/globpath/testdata/log1.log
vendored
0
internal/globpath/testdata/log1.log
vendored
0
internal/globpath/testdata/log2.log
vendored
0
internal/globpath/testdata/log2.log
vendored
5
internal/globpath/testdata/test.conf
vendored
5
internal/globpath/testdata/test.conf
vendored
@@ -1,5 +0,0 @@
|
|||||||
# this is a fake testing config file
|
|
||||||
# for testing the filestat plugin
|
|
||||||
|
|
||||||
option1 = "foo"
|
|
||||||
option2 = "bar"
|
|
||||||
@@ -2,29 +2,11 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"math/big"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
|
||||||
|
|
||||||
var (
|
|
||||||
TimeoutErr = errors.New("Command timed out.")
|
|
||||||
|
|
||||||
NotImplementedError = errors.New("not implemented yet")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Duration just wraps time.Duration
|
// Duration just wraps time.Duration
|
||||||
@@ -34,29 +16,51 @@ type Duration struct {
|
|||||||
|
|
||||||
// UnmarshalTOML parses the duration from the TOML config file
|
// UnmarshalTOML parses the duration from the TOML config file
|
||||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||||
var err error
|
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
|
||||||
// Parse string duration, ie, "1s"
|
if err != nil {
|
||||||
d.Duration, err = time.ParseDuration(string(b[1 : len(b)-1]))
|
return err
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// First try parsing as integer seconds
|
d.Duration = dur
|
||||||
sI, err := strconv.ParseInt(string(b), 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
d.Duration = time.Second * time.Duration(sI)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Second try parsing as float seconds
|
|
||||||
sF, err := strconv.ParseFloat(string(b), 64)
|
|
||||||
if err == nil {
|
|
||||||
d.Duration = time.Second * time.Duration(sF)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var NotImplementedError = errors.New("not implemented yet")
|
||||||
|
|
||||||
|
type JSONFlattener struct {
|
||||||
|
Fields map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlattenJSON flattens nested maps/interfaces into a fields map
|
||||||
|
func (f *JSONFlattener) FlattenJSON(
|
||||||
|
fieldname string,
|
||||||
|
v interface{},
|
||||||
|
) error {
|
||||||
|
if f.Fields == nil {
|
||||||
|
f.Fields = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
fieldname = strings.Trim(fieldname, "_")
|
||||||
|
switch t := v.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
for k, v := range t {
|
||||||
|
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
f.Fields[fieldname] = t
|
||||||
|
case bool, string, []interface{}:
|
||||||
|
// ignored types
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)",
|
||||||
|
t, t, fieldname)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ReadLines reads contents from a file and splits them by new lines.
|
// ReadLines reads contents from a file and splits them by new lines.
|
||||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||||
func ReadLines(filename string) ([]string, error) {
|
func ReadLines(filename string) ([]string, error) {
|
||||||
@@ -92,141 +96,58 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RandomString returns a random string of alpha-numeric characters
|
// Glob will test a string pattern, potentially containing globs, against a
|
||||||
func RandomString(n int) string {
|
// subject string. The result is a simple true/false, determining whether or
|
||||||
var bytes = make([]byte, n)
|
// not the glob pattern matched the subject text.
|
||||||
rand.Read(bytes)
|
//
|
||||||
for i, b := range bytes {
|
// Adapted from https://github.com/ryanuber/go-glob/blob/master/glob.go
|
||||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
// thanks Ryan Uber!
|
||||||
}
|
func Glob(pattern, measurement string) bool {
|
||||||
return string(bytes)
|
// Empty pattern can only match empty subject
|
||||||
}
|
if pattern == "" {
|
||||||
|
return measurement == pattern
|
||||||
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
|
||||||
// you must give the full path to the files.
|
|
||||||
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
|
||||||
func GetTLSConfig(
|
|
||||||
SSLCert, SSLKey, SSLCA string,
|
|
||||||
InsecureSkipVerify bool,
|
|
||||||
) (*tls.Config, error) {
|
|
||||||
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &tls.Config{
|
// If the pattern _is_ a glob, it matches everything
|
||||||
InsecureSkipVerify: InsecureSkipVerify,
|
if pattern == "*" {
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if SSLCA != "" {
|
parts := strings.Split(pattern, "*")
|
||||||
caCert, err := ioutil.ReadFile(SSLCA)
|
|
||||||
if err != nil {
|
if len(parts) == 1 {
|
||||||
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
// No globs in pattern, so test for match
|
||||||
err))
|
return pattern == measurement
|
||||||
|
}
|
||||||
|
|
||||||
|
leadingGlob := strings.HasPrefix(pattern, "*")
|
||||||
|
trailingGlob := strings.HasSuffix(pattern, "*")
|
||||||
|
end := len(parts) - 1
|
||||||
|
|
||||||
|
for i, part := range parts {
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
if leadingGlob {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(measurement, part) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case end:
|
||||||
|
if len(measurement) > 0 {
|
||||||
|
return trailingGlob || strings.HasSuffix(measurement, part)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if !strings.Contains(measurement, part) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
caCertPool := x509.NewCertPool()
|
// Trim evaluated text from measurement as we loop over the pattern.
|
||||||
caCertPool.AppendCertsFromPEM(caCert)
|
idx := strings.Index(measurement, part) + len(part)
|
||||||
t.RootCAs = caCertPool
|
measurement = measurement[idx:]
|
||||||
}
|
}
|
||||||
|
|
||||||
if SSLCert != "" && SSLKey != "" {
|
// All parts of the pattern matched
|
||||||
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
return true
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New(fmt.Sprintf(
|
|
||||||
"Could not load TLS client key/certificate from %s:%s: %s",
|
|
||||||
SSLKey, SSLCert, err))
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Certificates = []tls.Certificate{cert}
|
|
||||||
t.BuildNameToCertificate()
|
|
||||||
}
|
|
||||||
|
|
||||||
// will be nil by default if nothing is provided
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SnakeCase converts the given string to snake case following the Golang format:
|
|
||||||
// acronyms are converted to lower-case and preceded by an underscore.
|
|
||||||
func SnakeCase(in string) string {
|
|
||||||
runes := []rune(in)
|
|
||||||
length := len(runes)
|
|
||||||
|
|
||||||
var out []rune
|
|
||||||
for i := 0; i < length; i++ {
|
|
||||||
if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {
|
|
||||||
out = append(out, '_')
|
|
||||||
}
|
|
||||||
out = append(out, unicode.ToLower(runes[i]))
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CombinedOutputTimeout runs the given command with the given timeout and
|
|
||||||
// returns the combined output of stdout and stderr.
|
|
||||||
// If the command times out, it attempts to kill the process.
|
|
||||||
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
c.Stdout = &b
|
|
||||||
c.Stderr = &b
|
|
||||||
if err := c.Start(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err := WaitTimeout(c, timeout)
|
|
||||||
return b.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunTimeout runs the given command with the given timeout.
|
|
||||||
// If the command times out, it attempts to kill the process.
|
|
||||||
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
|
|
||||||
if err := c.Start(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return WaitTimeout(c, timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitTimeout waits for the given command to finish with a timeout.
|
|
||||||
// It assumes the command has already been started.
|
|
||||||
// If the command times out, it attempts to kill the process.
|
|
||||||
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
|
||||||
timer := time.NewTimer(timeout)
|
|
||||||
done := make(chan error)
|
|
||||||
go func() { done <- c.Wait() }()
|
|
||||||
select {
|
|
||||||
case err := <-done:
|
|
||||||
timer.Stop()
|
|
||||||
return err
|
|
||||||
case <-timer.C:
|
|
||||||
if err := c.Process.Kill(); err != nil {
|
|
||||||
log.Printf("E! FATAL error killing process: %s", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// wait for the command to return after killing it
|
|
||||||
<-done
|
|
||||||
return TimeoutErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RandomSleep will sleep for a random amount of time up to max.
|
|
||||||
// If the shutdown channel is closed, it will return before it has finished
|
|
||||||
// sleeping.
|
|
||||||
func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
|
||||||
if max == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
maxSleep := big.NewInt(max.Nanoseconds())
|
|
||||||
|
|
||||||
var sleepns int64
|
|
||||||
if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
|
|
||||||
sleepns = j.Int64()
|
|
||||||
}
|
|
||||||
|
|
||||||
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
return
|
|
||||||
case <-shutdown:
|
|
||||||
t.Stop()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,133 +1,44 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import "testing"
|
||||||
"os/exec"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
func testGlobMatch(t *testing.T, pattern, subj string) {
|
||||||
)
|
if !Glob(pattern, subj) {
|
||||||
|
t.Errorf("%s should match %s", pattern, subj)
|
||||||
type SnakeTest struct {
|
|
||||||
input string
|
|
||||||
output string
|
|
||||||
}
|
|
||||||
|
|
||||||
var tests = []SnakeTest{
|
|
||||||
{"a", "a"},
|
|
||||||
{"snake", "snake"},
|
|
||||||
{"A", "a"},
|
|
||||||
{"ID", "id"},
|
|
||||||
{"MOTD", "motd"},
|
|
||||||
{"Snake", "snake"},
|
|
||||||
{"SnakeTest", "snake_test"},
|
|
||||||
{"APIResponse", "api_response"},
|
|
||||||
{"SnakeID", "snake_id"},
|
|
||||||
{"SnakeIDGoogle", "snake_id_google"},
|
|
||||||
{"LinuxMOTD", "linux_motd"},
|
|
||||||
{"OMGWTFBBQ", "omgwtfbbq"},
|
|
||||||
{"omg_wtf_bbq", "omg_wtf_bbq"},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSnakeCase(t *testing.T) {
|
|
||||||
for _, test := range tests {
|
|
||||||
if SnakeCase(test.input) != test.output {
|
|
||||||
t.Errorf(`SnakeCase("%s"), wanted "%s", got \%s"`, test.input, test.output, SnakeCase(test.input))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
func testGlobNoMatch(t *testing.T, pattern, subj string) {
|
||||||
sleepbin, _ = exec.LookPath("sleep")
|
if Glob(pattern, subj) {
|
||||||
echobin, _ = exec.LookPath("echo")
|
t.Errorf("%s should not match %s", pattern, subj)
|
||||||
)
|
|
||||||
|
|
||||||
func TestRunTimeout(t *testing.T) {
|
|
||||||
if sleepbin == "" {
|
|
||||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
|
||||||
}
|
}
|
||||||
cmd := exec.Command(sleepbin, "10")
|
|
||||||
start := time.Now()
|
|
||||||
err := RunTimeout(cmd, time.Millisecond*20)
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
|
|
||||||
assert.Equal(t, TimeoutErr, err)
|
|
||||||
// Verify that command gets killed in 20ms, with some breathing room
|
|
||||||
assert.True(t, elapsed < time.Millisecond*75)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCombinedOutputTimeout(t *testing.T) {
|
func TestEmptyPattern(t *testing.T) {
|
||||||
if sleepbin == "" {
|
testGlobMatch(t, "", "")
|
||||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
testGlobNoMatch(t, "", "test")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatternWithoutGlobs(t *testing.T) {
|
||||||
|
testGlobMatch(t, "test", "test")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGlob(t *testing.T) {
|
||||||
|
for _, pattern := range []string{
|
||||||
|
"*test", // Leading glob
|
||||||
|
"this*", // Trailing glob
|
||||||
|
"*is*a*", // Lots of globs
|
||||||
|
"**test**", // Double glob characters
|
||||||
|
"**is**a***test*", // Varying number of globs
|
||||||
|
} {
|
||||||
|
testGlobMatch(t, pattern, "this_is_a_test")
|
||||||
}
|
}
|
||||||
cmd := exec.Command(sleepbin, "10")
|
|
||||||
start := time.Now()
|
|
||||||
_, err := CombinedOutputTimeout(cmd, time.Millisecond*20)
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
|
|
||||||
assert.Equal(t, TimeoutErr, err)
|
for _, pattern := range []string{
|
||||||
// Verify that command gets killed in 20ms, with some breathing room
|
"test*", // Implicit substring match should fail
|
||||||
assert.True(t, elapsed < time.Millisecond*75)
|
"*is", // Partial match should fail
|
||||||
}
|
"*no*", // Globs without a match between them should fail
|
||||||
|
} {
|
||||||
func TestCombinedOutput(t *testing.T) {
|
testGlobNoMatch(t, pattern, "this_is_a_test")
|
||||||
if echobin == "" {
|
|
||||||
t.Skip("'echo' binary not available on OS, skipping.")
|
|
||||||
}
|
}
|
||||||
cmd := exec.Command(echobin, "foo")
|
|
||||||
out, err := CombinedOutputTimeout(cmd, time.Second)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "foo\n", string(out))
|
|
||||||
}
|
|
||||||
|
|
||||||
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
|
|
||||||
// the same output from a failed command.
|
|
||||||
func TestCombinedOutputError(t *testing.T) {
|
|
||||||
if sleepbin == "" {
|
|
||||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
|
||||||
}
|
|
||||||
cmd := exec.Command(sleepbin, "foo")
|
|
||||||
expected, err := cmd.CombinedOutput()
|
|
||||||
|
|
||||||
cmd2 := exec.Command(sleepbin, "foo")
|
|
||||||
actual, err := CombinedOutputTimeout(cmd2, time.Second)
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Equal(t, expected, actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRunError(t *testing.T) {
|
|
||||||
if sleepbin == "" {
|
|
||||||
t.Skip("'sleep' binary not available on OS, skipping.")
|
|
||||||
}
|
|
||||||
cmd := exec.Command(sleepbin, "foo")
|
|
||||||
err := RunTimeout(cmd, time.Second)
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRandomSleep(t *testing.T) {
|
|
||||||
// test that zero max returns immediately
|
|
||||||
s := time.Now()
|
|
||||||
RandomSleep(time.Duration(0), make(chan struct{}))
|
|
||||||
elapsed := time.Since(s)
|
|
||||||
assert.True(t, elapsed < time.Millisecond)
|
|
||||||
|
|
||||||
// test that max sleep is respected
|
|
||||||
s = time.Now()
|
|
||||||
RandomSleep(time.Millisecond*50, make(chan struct{}))
|
|
||||||
elapsed = time.Since(s)
|
|
||||||
assert.True(t, elapsed < time.Millisecond*100)
|
|
||||||
|
|
||||||
// test that shutdown is respected
|
|
||||||
s = time.Now()
|
|
||||||
shutdown := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
time.Sleep(time.Millisecond * 100)
|
|
||||||
close(shutdown)
|
|
||||||
}()
|
|
||||||
RandomSleep(time.Second, shutdown)
|
|
||||||
elapsed = time.Since(s)
|
|
||||||
assert.True(t, elapsed < time.Millisecond*150)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,59 +0,0 @@
|
|||||||
package limiter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewRateLimiter returns a rate limiter that will will emit from the C
|
|
||||||
// channel only 'n' times every 'rate' seconds.
|
|
||||||
func NewRateLimiter(n int, rate time.Duration) *rateLimiter {
|
|
||||||
r := &rateLimiter{
|
|
||||||
C: make(chan bool),
|
|
||||||
rate: rate,
|
|
||||||
n: n,
|
|
||||||
shutdown: make(chan bool),
|
|
||||||
}
|
|
||||||
r.wg.Add(1)
|
|
||||||
go r.limiter()
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
type rateLimiter struct {
|
|
||||||
C chan bool
|
|
||||||
rate time.Duration
|
|
||||||
n int
|
|
||||||
|
|
||||||
shutdown chan bool
|
|
||||||
wg sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *rateLimiter) Stop() {
|
|
||||||
close(r.shutdown)
|
|
||||||
r.wg.Wait()
|
|
||||||
close(r.C)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *rateLimiter) limiter() {
|
|
||||||
defer r.wg.Done()
|
|
||||||
ticker := time.NewTicker(r.rate)
|
|
||||||
defer ticker.Stop()
|
|
||||||
counter := 0
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-r.shutdown:
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
counter = 0
|
|
||||||
default:
|
|
||||||
if counter < r.n {
|
|
||||||
select {
|
|
||||||
case r.C <- true:
|
|
||||||
counter++
|
|
||||||
case <-r.shutdown:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
package limiter
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRateLimiter(t *testing.T) {
|
|
||||||
r := NewRateLimiter(5, time.Second)
|
|
||||||
ticker := time.NewTicker(time.Millisecond * 75)
|
|
||||||
|
|
||||||
// test that we can only get 5 receives from the rate limiter
|
|
||||||
counter := 0
|
|
||||||
outer:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-r.C:
|
|
||||||
counter++
|
|
||||||
case <-ticker.C:
|
|
||||||
break outer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, 5, counter)
|
|
||||||
r.Stop()
|
|
||||||
// verify that the Stop function closes the channel.
|
|
||||||
_, ok := <-r.C
|
|
||||||
assert.False(t, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRateLimiterMultipleIterations(t *testing.T) {
|
|
||||||
r := NewRateLimiter(5, time.Millisecond*50)
|
|
||||||
ticker := time.NewTicker(time.Millisecond * 250)
|
|
||||||
|
|
||||||
// test that we can get 15 receives from the rate limiter
|
|
||||||
counter := 0
|
|
||||||
outer:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
break outer
|
|
||||||
case <-r.C:
|
|
||||||
counter++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, counter > 10)
|
|
||||||
r.Stop()
|
|
||||||
// verify that the Stop function closes the channel.
|
|
||||||
_, ok := <-r.C
|
|
||||||
assert.False(t, ok)
|
|
||||||
}
|
|
||||||
@@ -1,227 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/filter"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TagFilter is the name of a tag, and the values on which to filter
|
|
||||||
type TagFilter struct {
|
|
||||||
Name string
|
|
||||||
Filter []string
|
|
||||||
filter filter.Filter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter containing drop/pass and tagdrop/tagpass rules
|
|
||||||
type Filter struct {
|
|
||||||
NameDrop []string
|
|
||||||
nameDrop filter.Filter
|
|
||||||
NamePass []string
|
|
||||||
namePass filter.Filter
|
|
||||||
|
|
||||||
FieldDrop []string
|
|
||||||
fieldDrop filter.Filter
|
|
||||||
FieldPass []string
|
|
||||||
fieldPass filter.Filter
|
|
||||||
|
|
||||||
TagDrop []TagFilter
|
|
||||||
TagPass []TagFilter
|
|
||||||
|
|
||||||
TagExclude []string
|
|
||||||
tagExclude filter.Filter
|
|
||||||
TagInclude []string
|
|
||||||
tagInclude filter.Filter
|
|
||||||
|
|
||||||
isActive bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile all Filter lists into filter.Filter objects.
|
|
||||||
func (f *Filter) Compile() error {
|
|
||||||
if len(f.NameDrop) == 0 &&
|
|
||||||
len(f.NamePass) == 0 &&
|
|
||||||
len(f.FieldDrop) == 0 &&
|
|
||||||
len(f.FieldPass) == 0 &&
|
|
||||||
len(f.TagInclude) == 0 &&
|
|
||||||
len(f.TagExclude) == 0 &&
|
|
||||||
len(f.TagPass) == 0 &&
|
|
||||||
len(f.TagDrop) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
f.isActive = true
|
|
||||||
var err error
|
|
||||||
f.nameDrop, err = filter.Compile(f.NameDrop)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error compiling 'namedrop', %s", err)
|
|
||||||
}
|
|
||||||
f.namePass, err = filter.Compile(f.NamePass)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error compiling 'namepass', %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f.fieldDrop, err = filter.Compile(f.FieldDrop)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error compiling 'fielddrop', %s", err)
|
|
||||||
}
|
|
||||||
f.fieldPass, err = filter.Compile(f.FieldPass)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error compiling 'fieldpass', %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f.tagExclude, err = filter.Compile(f.TagExclude)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error compiling 'tagexclude', %s", err)
|
|
||||||
}
|
|
||||||
f.tagInclude, err = filter.Compile(f.TagInclude)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, _ := range f.TagDrop {
|
|
||||||
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i, _ := range f.TagPass {
|
|
||||||
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply applies the filter to the given measurement name, fields map, and
|
|
||||||
// tags map. It will return false if the metric should be "filtered out", and
|
|
||||||
// true if the metric should "pass".
|
|
||||||
// It will modify tags in-place if they need to be deleted.
|
|
||||||
func (f *Filter) Apply(
|
|
||||||
measurement string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tags map[string]string,
|
|
||||||
) bool {
|
|
||||||
if !f.isActive {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if the measurement name should pass
|
|
||||||
if !f.shouldNamePass(measurement) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if the tags should pass
|
|
||||||
if !f.shouldTagsPass(tags) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// filter fields
|
|
||||||
for fieldkey, _ := range fields {
|
|
||||||
if !f.shouldFieldPass(fieldkey) {
|
|
||||||
delete(fields, fieldkey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(fields) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// filter tags
|
|
||||||
f.filterTags(tags)
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Filter) IsActive() bool {
|
|
||||||
return f.isActive
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldNamePass returns true if the metric should pass, false if should drop
|
|
||||||
// based on the drop/pass filter parameters
|
|
||||||
func (f *Filter) shouldNamePass(key string) bool {
|
|
||||||
if f.namePass != nil {
|
|
||||||
if f.namePass.Match(key) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.nameDrop != nil {
|
|
||||||
if f.nameDrop.Match(key) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldFieldPass returns true if the metric should pass, false if should drop
|
|
||||||
// based on the drop/pass filter parameters
|
|
||||||
func (f *Filter) shouldFieldPass(key string) bool {
|
|
||||||
if f.fieldPass != nil {
|
|
||||||
if f.fieldPass.Match(key) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.fieldDrop != nil {
|
|
||||||
if f.fieldDrop.Match(key) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldTagsPass returns true if the metric should pass, false if should drop
|
|
||||||
// based on the tagdrop/tagpass filter parameters
|
|
||||||
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
|
||||||
if f.TagPass != nil {
|
|
||||||
for _, pat := range f.TagPass {
|
|
||||||
if pat.filter == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if tagval, ok := tags[pat.Name]; ok {
|
|
||||||
if pat.filter.Match(tagval) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.TagDrop != nil {
|
|
||||||
for _, pat := range f.TagDrop {
|
|
||||||
if pat.filter == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if tagval, ok := tags[pat.Name]; ok {
|
|
||||||
if pat.filter.Match(tagval) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply TagInclude and TagExclude filters.
|
|
||||||
// modifies the tags map in-place.
|
|
||||||
func (f *Filter) filterTags(tags map[string]string) {
|
|
||||||
if f.tagInclude != nil {
|
|
||||||
for k, _ := range tags {
|
|
||||||
if !f.tagInclude.Match(k) {
|
|
||||||
delete(tags, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.tagExclude != nil {
|
|
||||||
for k, _ := range tags {
|
|
||||||
if f.tagExclude.Match(k) {
|
|
||||||
delete(tags, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,359 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFilter_ApplyEmpty(t *testing.T) {
|
|
||||||
f := Filter{}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
assert.False(t, f.IsActive())
|
|
||||||
|
|
||||||
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_ApplyTagsDontPass(t *testing.T) {
|
|
||||||
filters := []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "cpu",
|
|
||||||
Filter: []string{"cpu-*"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
f := Filter{
|
|
||||||
TagDrop: filters,
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
assert.True(t, f.IsActive())
|
|
||||||
|
|
||||||
assert.False(t, f.Apply("m",
|
|
||||||
map[string]interface{}{"value": int64(1)},
|
|
||||||
map[string]string{"cpu": "cpu-total"}))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_ApplyDeleteFields(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
FieldDrop: []string{"value"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
assert.True(t, f.IsActive())
|
|
||||||
|
|
||||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
|
||||||
assert.True(t, f.Apply("m", fields, nil))
|
|
||||||
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
FieldDrop: []string{"value*"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
assert.True(t, f.IsActive())
|
|
||||||
|
|
||||||
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
|
||||||
assert.False(t, f.Apply("m", fields, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_Empty(t *testing.T) {
|
|
||||||
f := Filter{}
|
|
||||||
|
|
||||||
measurements := []string{
|
|
||||||
"foo",
|
|
||||||
"bar",
|
|
||||||
"barfoo",
|
|
||||||
"foo_bar",
|
|
||||||
"foo.bar",
|
|
||||||
"foo-bar",
|
|
||||||
"supercalifradjulisticexpialidocious",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range measurements {
|
|
||||||
if !f.shouldFieldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to pass", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_NamePass(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
NamePass: []string{"foo*", "cpu_usage_idle"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
passes := []string{
|
|
||||||
"foo",
|
|
||||||
"foo_bar",
|
|
||||||
"foo.bar",
|
|
||||||
"foo-bar",
|
|
||||||
"cpu_usage_idle",
|
|
||||||
}
|
|
||||||
|
|
||||||
drops := []string{
|
|
||||||
"bar",
|
|
||||||
"barfoo",
|
|
||||||
"bar_foo",
|
|
||||||
"cpu_usage_busy",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range passes {
|
|
||||||
if !f.shouldNamePass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to pass", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range drops {
|
|
||||||
if f.shouldNamePass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to drop", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_NameDrop(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
NameDrop: []string{"foo*", "cpu_usage_idle"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
drops := []string{
|
|
||||||
"foo",
|
|
||||||
"foo_bar",
|
|
||||||
"foo.bar",
|
|
||||||
"foo-bar",
|
|
||||||
"cpu_usage_idle",
|
|
||||||
}
|
|
||||||
|
|
||||||
passes := []string{
|
|
||||||
"bar",
|
|
||||||
"barfoo",
|
|
||||||
"bar_foo",
|
|
||||||
"cpu_usage_busy",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range passes {
|
|
||||||
if !f.shouldNamePass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to pass", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range drops {
|
|
||||||
if f.shouldNamePass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to drop", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_FieldPass(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
FieldPass: []string{"foo*", "cpu_usage_idle"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
passes := []string{
|
|
||||||
"foo",
|
|
||||||
"foo_bar",
|
|
||||||
"foo.bar",
|
|
||||||
"foo-bar",
|
|
||||||
"cpu_usage_idle",
|
|
||||||
}
|
|
||||||
|
|
||||||
drops := []string{
|
|
||||||
"bar",
|
|
||||||
"barfoo",
|
|
||||||
"bar_foo",
|
|
||||||
"cpu_usage_busy",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range passes {
|
|
||||||
if !f.shouldFieldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to pass", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range drops {
|
|
||||||
if f.shouldFieldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to drop", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_FieldDrop(t *testing.T) {
|
|
||||||
f := Filter{
|
|
||||||
FieldDrop: []string{"foo*", "cpu_usage_idle"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
drops := []string{
|
|
||||||
"foo",
|
|
||||||
"foo_bar",
|
|
||||||
"foo.bar",
|
|
||||||
"foo-bar",
|
|
||||||
"cpu_usage_idle",
|
|
||||||
}
|
|
||||||
|
|
||||||
passes := []string{
|
|
||||||
"bar",
|
|
||||||
"barfoo",
|
|
||||||
"bar_foo",
|
|
||||||
"cpu_usage_busy",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range passes {
|
|
||||||
if !f.shouldFieldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to pass", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, measurement := range drops {
|
|
||||||
if f.shouldFieldPass(measurement) {
|
|
||||||
t.Errorf("Expected measurement %s to drop", measurement)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_TagPass(t *testing.T) {
|
|
||||||
filters := []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "cpu",
|
|
||||||
Filter: []string{"cpu-*"},
|
|
||||||
},
|
|
||||||
TagFilter{
|
|
||||||
Name: "mem",
|
|
||||||
Filter: []string{"mem_free"},
|
|
||||||
}}
|
|
||||||
f := Filter{
|
|
||||||
TagPass: filters,
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
passes := []map[string]string{
|
|
||||||
{"cpu": "cpu-total"},
|
|
||||||
{"cpu": "cpu-0"},
|
|
||||||
{"cpu": "cpu-1"},
|
|
||||||
{"cpu": "cpu-2"},
|
|
||||||
{"mem": "mem_free"},
|
|
||||||
}
|
|
||||||
|
|
||||||
drops := []map[string]string{
|
|
||||||
{"cpu": "cputotal"},
|
|
||||||
{"cpu": "cpu0"},
|
|
||||||
{"cpu": "cpu1"},
|
|
||||||
{"cpu": "cpu2"},
|
|
||||||
{"mem": "mem_used"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tags := range passes {
|
|
||||||
if !f.shouldTagsPass(tags) {
|
|
||||||
t.Errorf("Expected tags %v to pass", tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tags := range drops {
|
|
||||||
if f.shouldTagsPass(tags) {
|
|
||||||
t.Errorf("Expected tags %v to drop", tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_TagDrop(t *testing.T) {
|
|
||||||
filters := []TagFilter{
|
|
||||||
TagFilter{
|
|
||||||
Name: "cpu",
|
|
||||||
Filter: []string{"cpu-*"},
|
|
||||||
},
|
|
||||||
TagFilter{
|
|
||||||
Name: "mem",
|
|
||||||
Filter: []string{"mem_free"},
|
|
||||||
}}
|
|
||||||
f := Filter{
|
|
||||||
TagDrop: filters,
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
drops := []map[string]string{
|
|
||||||
{"cpu": "cpu-total"},
|
|
||||||
{"cpu": "cpu-0"},
|
|
||||||
{"cpu": "cpu-1"},
|
|
||||||
{"cpu": "cpu-2"},
|
|
||||||
{"mem": "mem_free"},
|
|
||||||
}
|
|
||||||
|
|
||||||
passes := []map[string]string{
|
|
||||||
{"cpu": "cputotal"},
|
|
||||||
{"cpu": "cpu0"},
|
|
||||||
{"cpu": "cpu1"},
|
|
||||||
{"cpu": "cpu2"},
|
|
||||||
{"mem": "mem_used"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tags := range passes {
|
|
||||||
if !f.shouldTagsPass(tags) {
|
|
||||||
t.Errorf("Expected tags %v to pass", tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tags := range drops {
|
|
||||||
if f.shouldTagsPass(tags) {
|
|
||||||
t.Errorf("Expected tags %v to drop", tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
|
||||||
pretags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
"mytag": "foobar",
|
|
||||||
}
|
|
||||||
f := Filter{
|
|
||||||
TagExclude: []string{"nomatch"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
f.filterTags(pretags)
|
|
||||||
assert.Equal(t, map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
"mytag": "foobar",
|
|
||||||
}, pretags)
|
|
||||||
|
|
||||||
f = Filter{
|
|
||||||
TagInclude: []string{"nomatch"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
f.filterTags(pretags)
|
|
||||||
assert.Equal(t, map[string]string{}, pretags)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilter_FilterTagsMatches(t *testing.T) {
|
|
||||||
pretags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
"mytag": "foobar",
|
|
||||||
}
|
|
||||||
f := Filter{
|
|
||||||
TagExclude: []string{"ho*"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
f.filterTags(pretags)
|
|
||||||
assert.Equal(t, map[string]string{
|
|
||||||
"mytag": "foobar",
|
|
||||||
}, pretags)
|
|
||||||
|
|
||||||
pretags = map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
"mytag": "foobar",
|
|
||||||
}
|
|
||||||
f = Filter{
|
|
||||||
TagInclude: []string{"my*"},
|
|
||||||
}
|
|
||||||
require.NoError(t, f.Compile())
|
|
||||||
|
|
||||||
f.filterTags(pretags)
|
|
||||||
assert.Equal(t, map[string]string{
|
|
||||||
"mytag": "foobar",
|
|
||||||
}, pretags)
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RunningInput struct {
|
|
||||||
Name string
|
|
||||||
Input telegraf.Input
|
|
||||||
Config *InputConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// InputConfig containing a name, interval, and filter
|
|
||||||
type InputConfig struct {
|
|
||||||
Name string
|
|
||||||
NameOverride string
|
|
||||||
MeasurementPrefix string
|
|
||||||
MeasurementSuffix string
|
|
||||||
Tags map[string]string
|
|
||||||
Filter Filter
|
|
||||||
Interval time.Duration
|
|
||||||
}
|
|
||||||
@@ -1,156 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/internal/buffer"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Default size of metrics batch size.
|
|
||||||
DEFAULT_METRIC_BATCH_SIZE = 1000
|
|
||||||
|
|
||||||
// Default number of metrics kept. It should be a multiple of batch size.
|
|
||||||
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
|
||||||
)
|
|
||||||
|
|
||||||
// RunningOutput contains the output configuration
|
|
||||||
type RunningOutput struct {
|
|
||||||
Name string
|
|
||||||
Output telegraf.Output
|
|
||||||
Config *OutputConfig
|
|
||||||
Quiet bool
|
|
||||||
MetricBufferLimit int
|
|
||||||
MetricBatchSize int
|
|
||||||
|
|
||||||
metrics *buffer.Buffer
|
|
||||||
failMetrics *buffer.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRunningOutput(
|
|
||||||
name string,
|
|
||||||
output telegraf.Output,
|
|
||||||
conf *OutputConfig,
|
|
||||||
batchSize int,
|
|
||||||
bufferLimit int,
|
|
||||||
) *RunningOutput {
|
|
||||||
if bufferLimit == 0 {
|
|
||||||
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
|
|
||||||
}
|
|
||||||
if batchSize == 0 {
|
|
||||||
batchSize = DEFAULT_METRIC_BATCH_SIZE
|
|
||||||
}
|
|
||||||
ro := &RunningOutput{
|
|
||||||
Name: name,
|
|
||||||
metrics: buffer.NewBuffer(batchSize),
|
|
||||||
failMetrics: buffer.NewBuffer(bufferLimit),
|
|
||||||
Output: output,
|
|
||||||
Config: conf,
|
|
||||||
MetricBufferLimit: bufferLimit,
|
|
||||||
MetricBatchSize: batchSize,
|
|
||||||
}
|
|
||||||
return ro
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddMetric adds a metric to the output. This function can also write cached
|
|
||||||
// points if FlushBufferWhenFull is true.
|
|
||||||
func (ro *RunningOutput) AddMetric(metric telegraf.Metric) {
|
|
||||||
// Filter any tagexclude/taginclude parameters before adding metric
|
|
||||||
if ro.Config.Filter.IsActive() {
|
|
||||||
// In order to filter out tags, we need to create a new metric, since
|
|
||||||
// metrics are immutable once created.
|
|
||||||
name := metric.Name()
|
|
||||||
tags := metric.Tags()
|
|
||||||
fields := metric.Fields()
|
|
||||||
t := metric.Time()
|
|
||||||
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// error is not possible if creating from another metric, so ignore.
|
|
||||||
metric, _ = telegraf.NewMetric(name, tags, fields, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
ro.metrics.Add(metric)
|
|
||||||
if ro.metrics.Len() == ro.MetricBatchSize {
|
|
||||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
|
||||||
err := ro.write(batch)
|
|
||||||
if err != nil {
|
|
||||||
ro.failMetrics.Add(batch...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes all cached points to this output.
|
|
||||||
func (ro *RunningOutput) Write() error {
|
|
||||||
if !ro.Quiet {
|
|
||||||
log.Printf("I! Output [%s] buffer fullness: %d / %d metrics. "+
|
|
||||||
"Total gathered metrics: %d. Total dropped metrics: %d.",
|
|
||||||
ro.Name,
|
|
||||||
ro.failMetrics.Len()+ro.metrics.Len(),
|
|
||||||
ro.MetricBufferLimit,
|
|
||||||
ro.metrics.Total(),
|
|
||||||
ro.metrics.Drops()+ro.failMetrics.Drops())
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if !ro.failMetrics.IsEmpty() {
|
|
||||||
bufLen := ro.failMetrics.Len()
|
|
||||||
// how many batches of failed writes we need to write.
|
|
||||||
nBatches := bufLen/ro.MetricBatchSize + 1
|
|
||||||
batchSize := ro.MetricBatchSize
|
|
||||||
|
|
||||||
for i := 0; i < nBatches; i++ {
|
|
||||||
// If it's the last batch, only grab the metrics that have not had
|
|
||||||
// a write attempt already (this is primarily to preserve order).
|
|
||||||
if i == nBatches-1 {
|
|
||||||
batchSize = bufLen % ro.MetricBatchSize
|
|
||||||
}
|
|
||||||
batch := ro.failMetrics.Batch(batchSize)
|
|
||||||
// If we've already failed previous writes, don't bother trying to
|
|
||||||
// write to this output again. We are not exiting the loop just so
|
|
||||||
// that we can rotate the metrics to preserve order.
|
|
||||||
if err == nil {
|
|
||||||
err = ro.write(batch)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
ro.failMetrics.Add(batch...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
|
||||||
// see comment above about not trying to write to an already failed output.
|
|
||||||
// if ro.failMetrics is empty then err will always be nil at this point.
|
|
||||||
if err == nil {
|
|
||||||
err = ro.write(batch)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
ro.failMetrics.Add(batch...)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
|
||||||
if metrics == nil || len(metrics) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
start := time.Now()
|
|
||||||
err := ro.Output.Write(metrics)
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
if err == nil {
|
|
||||||
if !ro.Quiet {
|
|
||||||
log.Printf("I! Output [%s] wrote batch of %d metrics in %s\n",
|
|
||||||
ro.Name, len(metrics), elapsed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// OutputConfig containing name and filter
|
|
||||||
type OutputConfig struct {
|
|
||||||
Name string
|
|
||||||
Filter Filter
|
|
||||||
}
|
|
||||||
@@ -1,546 +0,0 @@
|
|||||||
package models
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
var first5 = []telegraf.Metric{
|
|
||||||
testutil.TestMetric(101, "metric1"),
|
|
||||||
testutil.TestMetric(101, "metric2"),
|
|
||||||
testutil.TestMetric(101, "metric3"),
|
|
||||||
testutil.TestMetric(101, "metric4"),
|
|
||||||
testutil.TestMetric(101, "metric5"),
|
|
||||||
}
|
|
||||||
|
|
||||||
var next5 = []telegraf.Metric{
|
|
||||||
testutil.TestMetric(101, "metric6"),
|
|
||||||
testutil.TestMetric(101, "metric7"),
|
|
||||||
testutil.TestMetric(101, "metric8"),
|
|
||||||
testutil.TestMetric(101, "metric9"),
|
|
||||||
testutil.TestMetric(101, "metric10"),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Benchmark adding metrics.
|
|
||||||
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &perfOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
ro.Quiet = true
|
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
ro.AddMetric(first5[0])
|
|
||||||
ro.Write()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Benchmark adding metrics.
|
|
||||||
func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &perfOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
ro.Quiet = true
|
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
ro.AddMetric(first5[0])
|
|
||||||
if n%100 == 0 {
|
|
||||||
ro.Write()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Benchmark adding metrics.
|
|
||||||
func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &perfOutput{}
|
|
||||||
m.failWrite = true
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
ro.Quiet = true
|
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
ro.AddMetric(first5[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that NameDrop filters ger properly applied.
|
|
||||||
func TestRunningOutput_DropFilter(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{
|
|
||||||
NameDrop: []string{"metric1", "metric2"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
for _, metric := range next5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that NameDrop filters without a match do nothing.
|
|
||||||
func TestRunningOutput_PassFilter(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{
|
|
||||||
NameDrop: []string{"metric1000", "foo*"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
for _, metric := range next5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that tags are properly included
|
|
||||||
func TestRunningOutput_TagIncludeNoMatch(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{
|
|
||||||
|
|
||||||
TagInclude: []string{"nothing*"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
ro.AddMetric(first5[0])
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 1)
|
|
||||||
assert.Empty(t, m.Metrics()[0].Tags())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that tags are properly excluded
|
|
||||||
func TestRunningOutput_TagExcludeMatch(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{
|
|
||||||
|
|
||||||
TagExclude: []string{"tag*"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
ro.AddMetric(first5[0])
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 1)
|
|
||||||
assert.Len(t, m.Metrics()[0].Tags(), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that tags are properly Excluded
|
|
||||||
func TestRunningOutput_TagExcludeNoMatch(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{
|
|
||||||
|
|
||||||
TagExclude: []string{"nothing*"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
ro.AddMetric(first5[0])
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 1)
|
|
||||||
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that tags are properly included
|
|
||||||
func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{
|
|
||||||
|
|
||||||
TagInclude: []string{"tag*"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
assert.NoError(t, conf.Filter.Compile())
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
ro.AddMetric(first5[0])
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 1)
|
|
||||||
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that we can write metrics with simple default setup.
|
|
||||||
func TestRunningOutputDefault(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
for _, metric := range next5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that running output doesn't flush until it's full when
|
|
||||||
// FlushBufferWhenFull is set.
|
|
||||||
func TestRunningOutputFlushWhenFull(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 6, 10)
|
|
||||||
|
|
||||||
// Fill buffer to 1 under limit
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
// no flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
// add one more metric
|
|
||||||
ro.AddMetric(next5[0])
|
|
||||||
// now it flushed
|
|
||||||
assert.Len(t, m.Metrics(), 6)
|
|
||||||
|
|
||||||
// add one more metric and write it manually
|
|
||||||
ro.AddMetric(next5[1])
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 7)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that running output doesn't flush until it's full when
|
|
||||||
// FlushBufferWhenFull is set, twice.
|
|
||||||
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 4, 12)
|
|
||||||
|
|
||||||
// Fill buffer past limit twive
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
for _, metric := range next5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
// flushed twice
|
|
||||||
assert.Len(t, m.Metrics(), 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRunningOutputWriteFail(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
m.failWrite = true
|
|
||||||
ro := NewRunningOutput("test", m, conf, 4, 12)
|
|
||||||
|
|
||||||
// Fill buffer to limit twice
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
for _, metric := range next5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
// manual write fails
|
|
||||||
err := ro.Write()
|
|
||||||
require.Error(t, err)
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
m.failWrite = false
|
|
||||||
err = ro.Write()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Len(t, m.Metrics(), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the order of points is preserved during a write failure.
|
|
||||||
func TestRunningOutputWriteFailOrder(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
m.failWrite = true
|
|
||||||
ro := NewRunningOutput("test", m, conf, 100, 1000)
|
|
||||||
|
|
||||||
// add 5 metrics
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
// Write fails
|
|
||||||
err := ro.Write()
|
|
||||||
require.Error(t, err)
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
m.failWrite = false
|
|
||||||
// add 5 more metrics
|
|
||||||
for _, metric := range next5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
err = ro.Write()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify that 10 metrics were written
|
|
||||||
assert.Len(t, m.Metrics(), 10)
|
|
||||||
// Verify that they are in order
|
|
||||||
expected := append(first5, next5...)
|
|
||||||
assert.Equal(t, expected, m.Metrics())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the order of points is preserved during many write failures.
|
|
||||||
func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
m.failWrite = true
|
|
||||||
ro := NewRunningOutput("test", m, conf, 5, 100)
|
|
||||||
|
|
||||||
// add 5 metrics
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
// Write fails
|
|
||||||
err := ro.Write()
|
|
||||||
require.Error(t, err)
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
// add 5 metrics
|
|
||||||
for _, metric := range next5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
// Write fails
|
|
||||||
err = ro.Write()
|
|
||||||
require.Error(t, err)
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
// add 5 metrics
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
// Write fails
|
|
||||||
err = ro.Write()
|
|
||||||
require.Error(t, err)
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
// add 5 metrics
|
|
||||||
for _, metric := range next5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
// Write fails
|
|
||||||
err = ro.Write()
|
|
||||||
require.Error(t, err)
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
m.failWrite = false
|
|
||||||
err = ro.Write()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify that 10 metrics were written
|
|
||||||
assert.Len(t, m.Metrics(), 20)
|
|
||||||
// Verify that they are in order
|
|
||||||
expected := append(first5, next5...)
|
|
||||||
expected = append(expected, first5...)
|
|
||||||
expected = append(expected, next5...)
|
|
||||||
assert.Equal(t, expected, m.Metrics())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the order of points is preserved when there is a remainder
|
|
||||||
// of points for the batch.
|
|
||||||
//
|
|
||||||
// ie, with a batch size of 5:
|
|
||||||
//
|
|
||||||
// 1 2 3 4 5 6 <-- order, failed points
|
|
||||||
// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch)
|
|
||||||
// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch)
|
|
||||||
//
|
|
||||||
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
m.failWrite = true
|
|
||||||
ro := NewRunningOutput("test", m, conf, 5, 1000)
|
|
||||||
|
|
||||||
// add 5 metrics
|
|
||||||
for _, metric := range first5 {
|
|
||||||
ro.AddMetric(metric)
|
|
||||||
}
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
// Write fails
|
|
||||||
err := ro.Write()
|
|
||||||
require.Error(t, err)
|
|
||||||
// no successful flush yet
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
// add and attempt to write a single metric:
|
|
||||||
ro.AddMetric(next5[0])
|
|
||||||
err = ro.Write()
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
// unset fail and write metrics
|
|
||||||
m.failWrite = false
|
|
||||||
err = ro.Write()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify that 6 metrics were written
|
|
||||||
assert.Len(t, m.Metrics(), 6)
|
|
||||||
// Verify that they are in order
|
|
||||||
expected := append(first5, next5[0])
|
|
||||||
assert.Equal(t, expected, m.Metrics())
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockOutput struct {
|
|
||||||
sync.Mutex
|
|
||||||
|
|
||||||
metrics []telegraf.Metric
|
|
||||||
|
|
||||||
// if true, mock a write failure
|
|
||||||
failWrite bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockOutput) Connect() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockOutput) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockOutput) Description() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockOutput) SampleConfig() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockOutput) Write(metrics []telegraf.Metric) error {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
if m.failWrite {
|
|
||||||
return fmt.Errorf("Failed Write!")
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.metrics == nil {
|
|
||||||
m.metrics = []telegraf.Metric{}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, metric := range metrics {
|
|
||||||
m.metrics = append(m.metrics, metric)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockOutput) Metrics() []telegraf.Metric {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
return m.metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
type perfOutput struct {
|
|
||||||
// if true, mock a write failure
|
|
||||||
failWrite bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *perfOutput) Connect() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *perfOutput) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *perfOutput) Description() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *perfOutput) SampleConfig() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *perfOutput) Write(metrics []telegraf.Metric) error {
|
|
||||||
if m.failWrite {
|
|
||||||
return fmt.Errorf("Failed Write!")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/influxdata/wlog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// newTelegrafWriter returns a logging-wrapped writer.
|
|
||||||
func newTelegrafWriter(w io.Writer) io.Writer {
|
|
||||||
return &telegrafLog{
|
|
||||||
writer: wlog.NewWriter(w),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type telegrafLog struct {
|
|
||||||
writer io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *telegrafLog) Write(p []byte) (n int, err error) {
|
|
||||||
return t.writer.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetupLogging configures the logging output.
|
|
||||||
// debug will set the log level to DEBUG
|
|
||||||
// quiet will set the log level to ERROR
|
|
||||||
// logfile will direct the logging output to a file. Empty string is
|
|
||||||
// interpreted as stdout. If there is an error opening the file the
|
|
||||||
// logger will fallback to stdout.
|
|
||||||
func SetupLogging(debug, quiet bool, logfile string) {
|
|
||||||
if debug {
|
|
||||||
wlog.SetLevel(wlog.DEBUG)
|
|
||||||
}
|
|
||||||
if quiet {
|
|
||||||
wlog.SetLevel(wlog.ERROR)
|
|
||||||
}
|
|
||||||
|
|
||||||
var oFile *os.File
|
|
||||||
if logfile != "" {
|
|
||||||
if _, err := os.Stat(logfile); os.IsNotExist(err) {
|
|
||||||
if oFile, err = os.Create(logfile); err != nil {
|
|
||||||
log.Printf("E! Unable to create %s (%s), using stdout", logfile, err)
|
|
||||||
oFile = os.Stdout
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil {
|
|
||||||
log.Printf("E! Unable to append to %s (%s), using stdout", logfile, err)
|
|
||||||
oFile = os.Stdout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
oFile = os.Stdout
|
|
||||||
}
|
|
||||||
|
|
||||||
log.SetOutput(newTelegrafWriter(oFile))
|
|
||||||
}
|
|
||||||
145
metric.go
145
metric.go
@@ -1,145 +0,0 @@
|
|||||||
package telegraf
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/influxdb/client/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValueType is an enumeration of metric types that represent a simple value.
|
|
||||||
type ValueType int
|
|
||||||
|
|
||||||
// Possible values for the ValueType enum.
|
|
||||||
const (
|
|
||||||
_ ValueType = iota
|
|
||||||
Counter
|
|
||||||
Gauge
|
|
||||||
Untyped
|
|
||||||
)
|
|
||||||
|
|
||||||
type Metric interface {
|
|
||||||
// Name returns the measurement name of the metric
|
|
||||||
Name() string
|
|
||||||
|
|
||||||
// Name returns the tags associated with the metric
|
|
||||||
Tags() map[string]string
|
|
||||||
|
|
||||||
// Time return the timestamp for the metric
|
|
||||||
Time() time.Time
|
|
||||||
|
|
||||||
// Type returns the metric type. Can be either telegraf.Gauge or telegraf.Counter
|
|
||||||
Type() ValueType
|
|
||||||
|
|
||||||
// UnixNano returns the unix nano time of the metric
|
|
||||||
UnixNano() int64
|
|
||||||
|
|
||||||
// Fields returns the fields for the metric
|
|
||||||
Fields() map[string]interface{}
|
|
||||||
|
|
||||||
// String returns a line-protocol string of the metric
|
|
||||||
String() string
|
|
||||||
|
|
||||||
// PrecisionString returns a line-protocol string of the metric, at precision
|
|
||||||
PrecisionString(precison string) string
|
|
||||||
|
|
||||||
// Point returns a influxdb client.Point object
|
|
||||||
Point() *client.Point
|
|
||||||
}
|
|
||||||
|
|
||||||
// metric is a wrapper of the influxdb client.Point struct
|
|
||||||
type metric struct {
|
|
||||||
pt *client.Point
|
|
||||||
|
|
||||||
mType ValueType
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMetric returns an untyped metric.
|
|
||||||
func NewMetric(
|
|
||||||
name string,
|
|
||||||
tags map[string]string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
t time.Time,
|
|
||||||
) (Metric, error) {
|
|
||||||
pt, err := client.NewPoint(name, tags, fields, t)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &metric{
|
|
||||||
pt: pt,
|
|
||||||
mType: Untyped,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGaugeMetric returns a gauge metric.
|
|
||||||
// Gauge metrics should be used when the metric is can arbitrarily go up and
|
|
||||||
// down. ie, temperature, memory usage, cpu usage, etc.
|
|
||||||
func NewGaugeMetric(
|
|
||||||
name string,
|
|
||||||
tags map[string]string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
t time.Time,
|
|
||||||
) (Metric, error) {
|
|
||||||
pt, err := client.NewPoint(name, tags, fields, t)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &metric{
|
|
||||||
pt: pt,
|
|
||||||
mType: Gauge,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCounterMetric returns a Counter metric.
|
|
||||||
// Counter metrics should be used when the metric being created is an
|
|
||||||
// always-increasing counter. ie, net bytes received, requests served, errors, etc.
|
|
||||||
func NewCounterMetric(
|
|
||||||
name string,
|
|
||||||
tags map[string]string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
t time.Time,
|
|
||||||
) (Metric, error) {
|
|
||||||
pt, err := client.NewPoint(name, tags, fields, t)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &metric{
|
|
||||||
pt: pt,
|
|
||||||
mType: Counter,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metric) Name() string {
|
|
||||||
return m.pt.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metric) Tags() map[string]string {
|
|
||||||
return m.pt.Tags()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metric) Time() time.Time {
|
|
||||||
return m.pt.Time()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metric) Type() ValueType {
|
|
||||||
return m.mType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metric) UnixNano() int64 {
|
|
||||||
return m.pt.UnixNano()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metric) Fields() map[string]interface{} {
|
|
||||||
return m.pt.Fields()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metric) String() string {
|
|
||||||
return m.pt.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metric) PrecisionString(precison string) string {
|
|
||||||
return m.pt.PrecisionString(precison)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metric) Point() *client.Point {
|
|
||||||
return m.pt
|
|
||||||
}
|
|
||||||
111
metric_test.go
111
metric_test.go
@@ -1,111 +0,0 @@
|
|||||||
package telegraf
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewMetric(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
"datacenter": "us-east-1",
|
|
||||||
}
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage_idle": float64(99),
|
|
||||||
"usage_busy": float64(1),
|
|
||||||
}
|
|
||||||
m, err := NewMetric("cpu", tags, fields, now)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, Untyped, m.Type())
|
|
||||||
assert.Equal(t, tags, m.Tags())
|
|
||||||
assert.Equal(t, fields, m.Fields())
|
|
||||||
assert.Equal(t, "cpu", m.Name())
|
|
||||||
assert.Equal(t, now, m.Time())
|
|
||||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewGaugeMetric(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
"datacenter": "us-east-1",
|
|
||||||
}
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage_idle": float64(99),
|
|
||||||
"usage_busy": float64(1),
|
|
||||||
}
|
|
||||||
m, err := NewGaugeMetric("cpu", tags, fields, now)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, Gauge, m.Type())
|
|
||||||
assert.Equal(t, tags, m.Tags())
|
|
||||||
assert.Equal(t, fields, m.Fields())
|
|
||||||
assert.Equal(t, "cpu", m.Name())
|
|
||||||
assert.Equal(t, now, m.Time())
|
|
||||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewCounterMetric(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
"datacenter": "us-east-1",
|
|
||||||
}
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage_idle": float64(99),
|
|
||||||
"usage_busy": float64(1),
|
|
||||||
}
|
|
||||||
m, err := NewCounterMetric("cpu", tags, fields, now)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, Counter, m.Type())
|
|
||||||
assert.Equal(t, tags, m.Tags())
|
|
||||||
assert.Equal(t, fields, m.Fields())
|
|
||||||
assert.Equal(t, "cpu", m.Name())
|
|
||||||
assert.Equal(t, now, m.Time())
|
|
||||||
assert.Equal(t, now.UnixNano(), m.UnixNano())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewMetricString(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
}
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage_idle": float64(99),
|
|
||||||
}
|
|
||||||
m, err := NewMetric("cpu", tags, fields, now)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
|
||||||
now.UnixNano())
|
|
||||||
assert.Equal(t, lineProto, m.String())
|
|
||||||
|
|
||||||
lineProtoPrecision := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d",
|
|
||||||
now.Unix())
|
|
||||||
assert.Equal(t, lineProtoPrecision, m.PrecisionString("s"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewMetricFailNaN(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tags := map[string]string{
|
|
||||||
"host": "localhost",
|
|
||||||
}
|
|
||||||
fields := map[string]interface{}{
|
|
||||||
"usage_idle": math.NaN(),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := NewMetric("cpu", tags, fields, now)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
16
outputs/all/all.go
Normal file
16
outputs/all/all.go
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
package all
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/amon"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/amqp"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/datadog"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/influxdb"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/kafka"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/kinesis"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/librato"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/mqtt"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/nsq"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/opentsdb"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/prometheus_client"
|
||||||
|
_ "github.com/influxdb/telegraf/outputs/riemann"
|
||||||
|
)
|
||||||
@@ -8,9 +8,9 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdb/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
"github.com/influxdb/telegraf/outputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Amon struct {
|
type Amon struct {
|
||||||
@@ -22,13 +22,13 @@ type Amon struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## Amon Server Key
|
# Amon Server Key
|
||||||
server_key = "my-server-key" # required.
|
server_key = "my-server-key" # required.
|
||||||
|
|
||||||
## Amon Instance URL
|
# Amon Instance URL
|
||||||
amon_instance = "https://youramoninstance" # required
|
amon_instance = "https://youramoninstance" # required
|
||||||
|
|
||||||
## Connection timeout.
|
# Connection timeout.
|
||||||
# timeout = "5s"
|
# timeout = "5s"
|
||||||
`
|
`
|
||||||
|
|
||||||
@@ -38,7 +38,7 @@ type TimeSeries struct {
|
|||||||
|
|
||||||
type Metric struct {
|
type Metric struct {
|
||||||
Metric string `json:"metric"`
|
Metric string `json:"metric"`
|
||||||
Points [1]Point `json:"metrics"`
|
Points [1]Point `json:"points"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Point [2]float64
|
type Point [2]float64
|
||||||
@@ -53,17 +53,17 @@ func (a *Amon) Connect() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Amon) Write(metrics []telegraf.Metric) error {
|
func (a *Amon) Write(points []*client.Point) error {
|
||||||
if len(metrics) == 0 {
|
if len(points) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ts := TimeSeries{}
|
ts := TimeSeries{}
|
||||||
tempSeries := []*Metric{}
|
tempSeries := []*Metric{}
|
||||||
metricCounter := 0
|
metricCounter := 0
|
||||||
|
|
||||||
for _, m := range metrics {
|
for _, pt := range points {
|
||||||
mname := strings.Replace(m.Name(), "_", ".", -1)
|
mname := strings.Replace(pt.Name(), "_", ".", -1)
|
||||||
if amonPts, err := buildMetrics(m); err == nil {
|
if amonPts, err := buildPoints(pt); err == nil {
|
||||||
for fieldName, amonPt := range amonPts {
|
for fieldName, amonPt := range amonPts {
|
||||||
metric := &Metric{
|
metric := &Metric{
|
||||||
Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1),
|
Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1),
|
||||||
@@ -73,7 +73,7 @@ func (a *Amon) Write(metrics []telegraf.Metric) error {
|
|||||||
metricCounter++
|
metricCounter++
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("I! unable to build Metric for %s, skipping\n", m.Name())
|
log.Printf("unable to build Metric for %s, skipping\n", pt.Name())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,17 +115,17 @@ func (a *Amon) authenticatedUrl() string {
|
|||||||
return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey)
|
return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildMetrics(m telegraf.Metric) (map[string]Point, error) {
|
func buildPoints(pt *client.Point) (map[string]Point, error) {
|
||||||
ms := make(map[string]Point)
|
pts := make(map[string]Point)
|
||||||
for k, v := range m.Fields() {
|
for k, v := range pt.Fields() {
|
||||||
var p Point
|
var p Point
|
||||||
if err := p.setValue(v); err != nil {
|
if err := p.setValue(v); err != nil {
|
||||||
return ms, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||||
}
|
}
|
||||||
p[0] = float64(m.Time().Unix())
|
p[0] = float64(pt.Time().Unix())
|
||||||
ms[k] = p
|
pts[k] = p
|
||||||
}
|
}
|
||||||
return ms, nil
|
return pts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Point) setValue(v interface{}) error {
|
func (p *Point) setValue(v interface{}) error {
|
||||||
@@ -151,7 +151,7 @@ func (a *Amon) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
outputs.Add("amon", func() telegraf.Output {
|
outputs.Add("amon", func() outputs.Output {
|
||||||
return &Amon{}
|
return &Amon{}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -6,19 +6,19 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBuildPoint(t *testing.T) {
|
func TestBuildPoint(t *testing.T) {
|
||||||
var tagtests = []struct {
|
var tagtests = []struct {
|
||||||
ptIn telegraf.Metric
|
ptIn *client.Point
|
||||||
outPt Point
|
outPt Point
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
testutil.TestMetric(float64(0.0), "testpt"),
|
testutil.TestPoint(float64(0.0)),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
0.0,
|
0.0,
|
||||||
@@ -26,7 +26,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(float64(1.0), "testpt"),
|
testutil.TestPoint(float64(1.0)),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
1.0,
|
1.0,
|
||||||
@@ -34,7 +34,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(int(10), "testpt"),
|
testutil.TestPoint(int(10)),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
10.0,
|
10.0,
|
||||||
@@ -42,7 +42,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(int32(112345), "testpt"),
|
testutil.TestPoint(int32(112345)),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
112345.0,
|
112345.0,
|
||||||
@@ -50,7 +50,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(int64(112345), "testpt"),
|
testutil.TestPoint(int64(112345)),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
112345.0,
|
112345.0,
|
||||||
@@ -58,7 +58,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(float32(11234.5), "testpt"),
|
testutil.TestPoint(float32(11234.5)),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
11234.5,
|
11234.5,
|
||||||
@@ -66,7 +66,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric("11234.5", "testpt"),
|
testutil.TestPoint("11234.5"),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
11234.5,
|
11234.5,
|
||||||
@@ -75,16 +75,15 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tagtests {
|
for _, tt := range tagtests {
|
||||||
pt, err := buildMetrics(tt.ptIn)
|
pt, err := buildPoint(tt.ptIn)
|
||||||
if err != nil && tt.err == nil {
|
if err != nil && tt.err == nil {
|
||||||
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
|
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
|
||||||
}
|
}
|
||||||
if tt.err != nil && err == nil {
|
if tt.err != nil && err == nil {
|
||||||
t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
|
t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil {
|
if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil {
|
||||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n",
|
t.Errorf("%s: \nexpected %+v\ngot %+v\n", tt.ptIn.Name(), tt.outPt, pt)
|
||||||
tt.ptIn.Name(), tt.outPt, pt["value"])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
161
outputs/amqp/amqp.go
Normal file
161
outputs/amqp/amqp.go
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
package amqp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
"github.com/streadway/amqp"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AMQP struct {
|
||||||
|
// AMQP brokers to send metrics to
|
||||||
|
URL string
|
||||||
|
// AMQP exchange
|
||||||
|
Exchange string
|
||||||
|
// Routing Key Tag
|
||||||
|
RoutingTag string `toml:"routing_tag"`
|
||||||
|
// InfluxDB database
|
||||||
|
Database string
|
||||||
|
// InfluxDB retention policy
|
||||||
|
RetentionPolicy string
|
||||||
|
// InfluxDB precision
|
||||||
|
Precision string
|
||||||
|
|
||||||
|
channel *amqp.Channel
|
||||||
|
sync.Mutex
|
||||||
|
headers amqp.Table
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultRetentionPolicy = "default"
|
||||||
|
DefaultDatabase = "telegraf"
|
||||||
|
DefaultPrecision = "s"
|
||||||
|
)
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# AMQP url
|
||||||
|
url = "amqp://localhost:5672/influxdb"
|
||||||
|
# AMQP exchange
|
||||||
|
exchange = "telegraf"
|
||||||
|
# Telegraf tag to use as a routing key
|
||||||
|
# ie, if this tag exists, it's value will be used as the routing key
|
||||||
|
routing_tag = "host"
|
||||||
|
|
||||||
|
# InfluxDB retention policy
|
||||||
|
#retention_policy = "default"
|
||||||
|
# InfluxDB database
|
||||||
|
#database = "telegraf"
|
||||||
|
# InfluxDB precision
|
||||||
|
#precision = "s"
|
||||||
|
`
|
||||||
|
|
||||||
|
func (q *AMQP) Connect() error {
|
||||||
|
q.Lock()
|
||||||
|
defer q.Unlock()
|
||||||
|
|
||||||
|
q.headers = amqp.Table{
|
||||||
|
"precision": q.Precision,
|
||||||
|
"database": q.Database,
|
||||||
|
"retention_policy": q.RetentionPolicy,
|
||||||
|
}
|
||||||
|
|
||||||
|
connection, err := amqp.Dial(q.URL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
channel, err := connection.Channel()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to open a channel: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = channel.ExchangeDeclare(
|
||||||
|
q.Exchange, // name
|
||||||
|
"topic", // type
|
||||||
|
true, // durable
|
||||||
|
false, // delete when unused
|
||||||
|
false, // internal
|
||||||
|
false, // no-wait
|
||||||
|
nil, // arguments
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to declare an exchange: %s", err)
|
||||||
|
}
|
||||||
|
q.channel = channel
|
||||||
|
go func() {
|
||||||
|
log.Printf("Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error)))
|
||||||
|
log.Printf("Trying to reconnect")
|
||||||
|
for err := q.Connect(); err != nil; err = q.Connect() {
|
||||||
|
log.Println(err)
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *AMQP) Close() error {
|
||||||
|
return q.channel.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *AMQP) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *AMQP) Description() string {
|
||||||
|
return "Configuration for the AMQP server to send metrics to"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *AMQP) Write(points []*client.Point) error {
|
||||||
|
q.Lock()
|
||||||
|
defer q.Unlock()
|
||||||
|
if len(points) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var outbuf = make(map[string][][]byte)
|
||||||
|
|
||||||
|
for _, p := range points {
|
||||||
|
// Combine tags from Point and BatchPoints and grab the resulting
|
||||||
|
// line-protocol output string to write to AMQP
|
||||||
|
var value, key string
|
||||||
|
value = p.String()
|
||||||
|
|
||||||
|
if q.RoutingTag != "" {
|
||||||
|
if h, ok := p.Tags()[q.RoutingTag]; ok {
|
||||||
|
key = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
outbuf[key] = append(outbuf[key], []byte(value))
|
||||||
|
|
||||||
|
}
|
||||||
|
for key, buf := range outbuf {
|
||||||
|
err := q.channel.Publish(
|
||||||
|
q.Exchange, // exchange
|
||||||
|
key, // routing key
|
||||||
|
false, // mandatory
|
||||||
|
false, // immediate
|
||||||
|
amqp.Publishing{
|
||||||
|
Headers: q.headers,
|
||||||
|
ContentType: "text/plain",
|
||||||
|
Body: bytes.Join(buf, []byte("\n")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("FAILED to send amqp message: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("amqp", func() outputs.Output {
|
||||||
|
return &AMQP{
|
||||||
|
Database: DefaultDatabase,
|
||||||
|
Precision: DefaultPrecision,
|
||||||
|
RetentionPolicy: DefaultRetentionPolicy,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -3,8 +3,7 @@ package amqp
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/serializers"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -14,11 +13,9 @@ func TestConnectAndWrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var url = "amqp://" + testutil.GetLocalHost() + ":5672/"
|
var url = "amqp://" + testutil.GetLocalHost() + ":5672/"
|
||||||
s, _ := serializers.NewInfluxSerializer()
|
|
||||||
q := &AMQP{
|
q := &AMQP{
|
||||||
URL: url,
|
URL: url,
|
||||||
Exchange: "telegraf_test",
|
Exchange: "telegraf_test",
|
||||||
serializer: s,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that we can connect to the AMQP broker
|
// Verify that we can connect to the AMQP broker
|
||||||
@@ -26,6 +23,6 @@ func TestConnectAndWrite(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Verify that we can successfully write data to the amqp broker
|
// Verify that we can successfully write data to the amqp broker
|
||||||
err = q.Write(testutil.MockMetrics())
|
err = q.Write(testutil.MockBatchPoints().Points())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -8,10 +8,11 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdb/telegraf/internal"
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
"github.com/influxdb/telegraf/outputs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Datadog struct {
|
type Datadog struct {
|
||||||
@@ -23,10 +24,10 @@ type Datadog struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## Datadog API key
|
# Datadog API key
|
||||||
apikey = "my-secret-key" # required.
|
apikey = "my-secret-key" # required.
|
||||||
|
|
||||||
## Connection timeout.
|
# Connection timeout.
|
||||||
# timeout = "5s"
|
# timeout = "5s"
|
||||||
`
|
`
|
||||||
|
|
||||||
@@ -61,38 +62,27 @@ func (d *Datadog) Connect() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Datadog) Write(metrics []telegraf.Metric) error {
|
func (d *Datadog) Write(points []*client.Point) error {
|
||||||
if len(metrics) == 0 {
|
if len(points) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ts := TimeSeries{}
|
ts := TimeSeries{}
|
||||||
tempSeries := []*Metric{}
|
tempSeries := []*Metric{}
|
||||||
metricCounter := 0
|
metricCounter := 0
|
||||||
|
|
||||||
for _, m := range metrics {
|
for _, pt := range points {
|
||||||
if dogMs, err := buildMetrics(m); err == nil {
|
mname := strings.Replace(pt.Name(), "_", ".", -1)
|
||||||
for fieldName, dogM := range dogMs {
|
if amonPts, err := buildPoints(pt); err == nil {
|
||||||
// name of the datadog measurement
|
for fieldName, amonPt := range amonPts {
|
||||||
var dname string
|
|
||||||
if fieldName == "value" {
|
|
||||||
// adding .value seems redundant here
|
|
||||||
dname = m.Name()
|
|
||||||
} else {
|
|
||||||
dname = m.Name() + "." + fieldName
|
|
||||||
}
|
|
||||||
var host string
|
|
||||||
host, _ = m.Tags()["host"]
|
|
||||||
metric := &Metric{
|
metric := &Metric{
|
||||||
Metric: dname,
|
Metric: mname + strings.Replace(fieldName, "_", ".", -1),
|
||||||
Tags: buildTags(m.Tags()),
|
|
||||||
Host: host,
|
|
||||||
}
|
}
|
||||||
metric.Points[0] = dogM
|
metric.Points[0] = amonPt
|
||||||
tempSeries = append(tempSeries, metric)
|
tempSeries = append(tempSeries, metric)
|
||||||
metricCounter++
|
metricCounter++
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("I! unable to build Metric for %s, skipping\n", m.Name())
|
log.Printf("unable to build Metric for %s, skipping\n", pt.Name())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,26 +126,23 @@ func (d *Datadog) authenticatedUrl() string {
|
|||||||
return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode())
|
return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode())
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildMetrics(m telegraf.Metric) (map[string]Point, error) {
|
func buildPoints(pt *client.Point) (map[string]Point, error) {
|
||||||
ms := make(map[string]Point)
|
pts := make(map[string]Point)
|
||||||
for k, v := range m.Fields() {
|
for k, v := range pt.Fields() {
|
||||||
if !verifyValue(v) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var p Point
|
var p Point
|
||||||
if err := p.setValue(v); err != nil {
|
if err := p.setValue(v); err != nil {
|
||||||
return ms, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||||
}
|
}
|
||||||
p[0] = float64(m.Time().Unix())
|
p[0] = float64(pt.Time().Unix())
|
||||||
ms[k] = p
|
pts[k] = p
|
||||||
}
|
}
|
||||||
return ms, nil
|
return pts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildTags(mTags map[string]string) []string {
|
func buildTags(ptTags map[string]string) []string {
|
||||||
tags := make([]string, len(mTags))
|
tags := make([]string, len(ptTags))
|
||||||
index := 0
|
index := 0
|
||||||
for k, v := range mTags {
|
for k, v := range ptTags {
|
||||||
tags[index] = fmt.Sprintf("%s:%s", k, v)
|
tags[index] = fmt.Sprintf("%s:%s", k, v)
|
||||||
index += 1
|
index += 1
|
||||||
}
|
}
|
||||||
@@ -163,14 +150,6 @@ func buildTags(mTags map[string]string) []string {
|
|||||||
return tags
|
return tags
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyValue(v interface{}) bool {
|
|
||||||
switch v.(type) {
|
|
||||||
case string:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Point) setValue(v interface{}) error {
|
func (p *Point) setValue(v interface{}) error {
|
||||||
switch d := v.(type) {
|
switch d := v.(type) {
|
||||||
case int:
|
case int:
|
||||||
@@ -194,7 +173,7 @@ func (d *Datadog) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
outputs.Add("datadog", func() telegraf.Output {
|
outputs.Add("datadog", func() outputs.Output {
|
||||||
return NewDatadog(datadog_api)
|
return NewDatadog(datadog_api)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -9,9 +9,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -38,7 +38,7 @@ func TestUriOverride(t *testing.T) {
|
|||||||
d.Apikey = "123456"
|
d.Apikey = "123456"
|
||||||
err := d.Connect()
|
err := d.Connect()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = d.Write(testutil.MockMetrics())
|
err = d.Write(testutil.MockBatchPoints().Points())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ func TestBadStatusCode(t *testing.T) {
|
|||||||
d.Apikey = "123456"
|
d.Apikey = "123456"
|
||||||
err := d.Connect()
|
err := d.Connect()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = d.Write(testutil.MockMetrics())
|
err = d.Write(testutil.MockBatchPoints().Points())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("error expected but none returned")
|
t.Errorf("error expected but none returned")
|
||||||
} else {
|
} else {
|
||||||
@@ -100,12 +100,12 @@ func TestBuildTags(t *testing.T) {
|
|||||||
|
|
||||||
func TestBuildPoint(t *testing.T) {
|
func TestBuildPoint(t *testing.T) {
|
||||||
var tagtests = []struct {
|
var tagtests = []struct {
|
||||||
ptIn telegraf.Metric
|
ptIn *client.Point
|
||||||
outPt Point
|
outPt Point
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
testutil.TestMetric(0.0, "test1"),
|
testutil.TestPoint(0.0, "test1"),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
0.0,
|
0.0,
|
||||||
@@ -113,7 +113,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(1.0, "test2"),
|
testutil.TestPoint(1.0, "test2"),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
1.0,
|
1.0,
|
||||||
@@ -121,7 +121,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(10, "test3"),
|
testutil.TestPoint(10, "test3"),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
10.0,
|
10.0,
|
||||||
@@ -129,7 +129,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(int32(112345), "test4"),
|
testutil.TestPoint(int32(112345), "test4"),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
112345.0,
|
112345.0,
|
||||||
@@ -137,7 +137,7 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(int64(112345), "test5"),
|
testutil.TestPoint(int64(112345), "test5"),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
112345.0,
|
112345.0,
|
||||||
@@ -145,47 +145,32 @@ func TestBuildPoint(t *testing.T) {
|
|||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testutil.TestMetric(float32(11234.5), "test6"),
|
testutil.TestPoint(float32(11234.5), "test6"),
|
||||||
Point{
|
Point{
|
||||||
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
11234.5,
|
11234.5,
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
testutil.TestPoint("11234.5", "test7"),
|
||||||
|
Point{
|
||||||
|
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
|
||||||
|
11234.5,
|
||||||
|
},
|
||||||
|
fmt.Errorf("unable to extract value from Fields, undeterminable type"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tagtests {
|
for _, tt := range tagtests {
|
||||||
pt, err := buildMetrics(tt.ptIn)
|
pt, err := buildPoint(tt.ptIn)
|
||||||
if err != nil && tt.err == nil {
|
if err != nil && tt.err == nil {
|
||||||
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
|
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
|
||||||
}
|
}
|
||||||
if tt.err != nil && err == nil {
|
if tt.err != nil && err == nil {
|
||||||
t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
|
t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil {
|
if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil {
|
||||||
t.Errorf("%s: \nexpected %+v\ngot %+v\n",
|
t.Errorf("%s: \nexpected %+v\ngot %+v\n", tt.ptIn.Name(), tt.outPt, pt)
|
||||||
tt.ptIn.Name(), tt.outPt, pt["value"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVerifyValue(t *testing.T) {
|
|
||||||
var tagtests = []struct {
|
|
||||||
ptIn telegraf.Metric
|
|
||||||
validMetric bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
testutil.TestMetric(float32(11234.5), "test1"),
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
testutil.TestMetric("11234.5", "test2"),
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tagtests {
|
|
||||||
ok := verifyValue(tt.ptIn.Fields()["value"])
|
|
||||||
if tt.validMetric != ok {
|
|
||||||
t.Errorf("%s: verification failed\n", tt.ptIn.Name())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
12
outputs/influxdb/README.md
Normal file
12
outputs/influxdb/README.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# InfluxDB Output Plugin
|
||||||
|
|
||||||
|
This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
|
||||||
|
|
||||||
|
Required parameters:
|
||||||
|
|
||||||
|
* `urls`: List of strings, this is for InfluxDB clustering
|
||||||
|
support. On each flush interval, Telegraf will randomly choose one of the urls
|
||||||
|
to write to. Each URL should start with either `http://` or `udp://`
|
||||||
|
* `database`: The name of the database to write to.
|
||||||
|
|
||||||
|
|
||||||
162
outputs/influxdb/influxdb.go
Normal file
162
outputs/influxdb/influxdb.go
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
package influxdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/internal"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type InfluxDB struct {
|
||||||
|
// URL is only for backwards compatability
|
||||||
|
URL string
|
||||||
|
URLs []string `toml:"urls"`
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
Database string
|
||||||
|
UserAgent string
|
||||||
|
Precision string
|
||||||
|
Timeout internal.Duration
|
||||||
|
UDPPayload int `toml:"udp_payload"`
|
||||||
|
|
||||||
|
conns []client.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||||
|
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||||
|
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||||
|
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||||
|
urls = ["http://localhost:8086"] # required
|
||||||
|
# The target database for metrics (telegraf will create it if not exists)
|
||||||
|
database = "telegraf" # required
|
||||||
|
# Precision of writes, valid values are n, u, ms, s, m, and h
|
||||||
|
# note: using second precision greatly helps InfluxDB compression
|
||||||
|
precision = "s"
|
||||||
|
|
||||||
|
# Connection timeout (for the connection with InfluxDB), formatted as a string.
|
||||||
|
# If not provided, will default to 0 (no timeout)
|
||||||
|
# timeout = "5s"
|
||||||
|
# username = "telegraf"
|
||||||
|
# password = "metricsmetricsmetricsmetrics"
|
||||||
|
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||||
|
# user_agent = "telegraf"
|
||||||
|
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||||
|
# udp_payload = 512
|
||||||
|
`
|
||||||
|
|
||||||
|
func (i *InfluxDB) Connect() error {
|
||||||
|
var urls []string
|
||||||
|
for _, u := range i.URLs {
|
||||||
|
urls = append(urls, u)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backward-compatability with single Influx URL config files
|
||||||
|
// This could eventually be removed in favor of specifying the urls as a list
|
||||||
|
if i.URL != "" {
|
||||||
|
urls = append(urls, i.URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
var conns []client.Client
|
||||||
|
for _, u := range urls {
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(u, "udp"):
|
||||||
|
parsed_url, err := url.Parse(u)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if i.UDPPayload == 0 {
|
||||||
|
i.UDPPayload = client.UDPPayloadSize
|
||||||
|
}
|
||||||
|
c, err := client.NewUDPClient(client.UDPConfig{
|
||||||
|
Addr: parsed_url.Host,
|
||||||
|
PayloadSize: i.UDPPayload,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
conns = append(conns, c)
|
||||||
|
default:
|
||||||
|
// If URL doesn't start with "udp", assume HTTP client
|
||||||
|
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||||
|
Addr: u,
|
||||||
|
Username: i.Username,
|
||||||
|
Password: i.Password,
|
||||||
|
UserAgent: i.UserAgent,
|
||||||
|
Timeout: i.Timeout.Duration,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Database if it doesn't exist
|
||||||
|
_, e := c.Query(client.Query{
|
||||||
|
Command: fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", i.Database),
|
||||||
|
})
|
||||||
|
|
||||||
|
if e != nil {
|
||||||
|
log.Println("Database creation failed: " + e.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
conns = append(conns, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i.conns = conns
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InfluxDB) Close() error {
|
||||||
|
// InfluxDB client does not provide a Close() function
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InfluxDB) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InfluxDB) Description() string {
|
||||||
|
return "Configuration for influxdb server to send metrics to"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Choose a random server in the cluster to write to until a successful write
|
||||||
|
// occurs, logging each unsuccessful. If all servers fail, return error.
|
||||||
|
func (i *InfluxDB) Write(points []*client.Point) error {
|
||||||
|
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Database: i.Database,
|
||||||
|
Precision: i.Precision,
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, point := range points {
|
||||||
|
bp.AddPoint(point)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This will get set to nil if a successful write occurs
|
||||||
|
err := errors.New("Could not write to any InfluxDB server in cluster")
|
||||||
|
|
||||||
|
p := rand.Perm(len(i.conns))
|
||||||
|
for _, n := range p {
|
||||||
|
if e := i.conns[n].Write(bp); e != nil {
|
||||||
|
log.Println("ERROR: " + e.Error())
|
||||||
|
} else {
|
||||||
|
err = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("influxdb", func() outputs.Output {
|
||||||
|
return &InfluxDB{}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -18,7 +18,7 @@ func TestUDPInflux(t *testing.T) {
|
|||||||
|
|
||||||
err := i.Connect()
|
err := i.Connect()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = i.Write(testutil.MockMetrics())
|
err = i.Write(testutil.MockBatchPoints().Points())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,6 +36,6 @@ func TestHTTPInflux(t *testing.T) {
|
|||||||
|
|
||||||
err := i.Connect()
|
err := i.Connect()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = i.Write(testutil.MockMetrics())
|
err = i.Write(testutil.MockBatchPoints().Points())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
85
outputs/kafka/kafka.go
Normal file
85
outputs/kafka/kafka.go
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
package kafka
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/Shopify/sarama"
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Kafka struct {
|
||||||
|
// Kafka brokers to send metrics to
|
||||||
|
Brokers []string
|
||||||
|
// Kafka topic
|
||||||
|
Topic string
|
||||||
|
// Routing Key Tag
|
||||||
|
RoutingTag string `toml:"routing_tag"`
|
||||||
|
|
||||||
|
producer sarama.SyncProducer
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# URLs of kafka brokers
|
||||||
|
brokers = ["localhost:9092"]
|
||||||
|
# Kafka topic for producer messages
|
||||||
|
topic = "telegraf"
|
||||||
|
# Telegraf tag to use as a routing key
|
||||||
|
# ie, if this tag exists, it's value will be used as the routing key
|
||||||
|
routing_tag = "host"
|
||||||
|
`
|
||||||
|
|
||||||
|
func (k *Kafka) Connect() error {
|
||||||
|
producer, err := sarama.NewSyncProducer(k.Brokers, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
k.producer = producer
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *Kafka) Close() error {
|
||||||
|
return k.producer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *Kafka) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *Kafka) Description() string {
|
||||||
|
return "Configuration for the Kafka server to send metrics to"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *Kafka) Write(points []*client.Point) error {
|
||||||
|
if len(points) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range points {
|
||||||
|
// Combine tags from Point and BatchPoints and grab the resulting
|
||||||
|
// line-protocol output string to write to Kafka
|
||||||
|
value := p.String()
|
||||||
|
|
||||||
|
m := &sarama.ProducerMessage{
|
||||||
|
Topic: k.Topic,
|
||||||
|
Value: sarama.StringEncoder(value),
|
||||||
|
}
|
||||||
|
if h, ok := p.Tags()[k.RoutingTag]; ok {
|
||||||
|
m.Key = sarama.StringEncoder(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err := k.producer.SendMessage(m)
|
||||||
|
if err != nil {
|
||||||
|
return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n",
|
||||||
|
err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("kafka", func() outputs.Output {
|
||||||
|
return &Kafka{}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -3,8 +3,7 @@ package kafka
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/serializers"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -14,11 +13,9 @@ func TestConnectAndWrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
brokers := []string{testutil.GetLocalHost() + ":9092"}
|
brokers := []string{testutil.GetLocalHost() + ":9092"}
|
||||||
s, _ := serializers.NewInfluxSerializer()
|
|
||||||
k := &Kafka{
|
k := &Kafka{
|
||||||
Brokers: brokers,
|
Brokers: brokers,
|
||||||
Topic: "Test",
|
Topic: "Test",
|
||||||
serializer: s,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that we can connect to the Kafka broker
|
// Verify that we can connect to the Kafka broker
|
||||||
@@ -26,6 +23,6 @@ func TestConnectAndWrite(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Verify that we can successfully write data to the kafka broker
|
// Verify that we can successfully write data to the kafka broker
|
||||||
err = k.Write(testutil.MockMetrics())
|
err = k.Write(testutil.MockBatchPoints().Points())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -13,12 +13,9 @@ maybe useful for users to review Amazons official documentation which is availab
|
|||||||
|
|
||||||
This plugin uses a credential chain for Authentication with the Kinesis API endpoint. In the following order the plugin
|
This plugin uses a credential chain for Authentication with the Kinesis API endpoint. In the following order the plugin
|
||||||
will attempt to authenticate.
|
will attempt to authenticate.
|
||||||
1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules)
|
1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
||||||
2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes
|
2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk)
|
||||||
3. Shared profile from `profile` attribute
|
3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk)
|
||||||
4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables)
|
|
||||||
5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file)
|
|
||||||
6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
|
|
||||||
|
|
||||||
|
|
||||||
## Config
|
## Config
|
||||||
@@ -61,4 +58,4 @@ String is defined using the default Point.String() value and translated to []byt
|
|||||||
|
|
||||||
#### custom
|
#### custom
|
||||||
|
|
||||||
Custom is a string defined by a number of values in the FormatMetric() function.
|
Custom is a string defined by a number of values in the FormatMetric() function.
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package kinesis
|
package kinesis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
@@ -8,22 +9,18 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
"github.com/influxdb/telegraf/outputs"
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type KinesisOutput struct {
|
type KinesisOutput struct {
|
||||||
Region string `toml:"region"`
|
Region string `toml:"region"`
|
||||||
AccessKey string `toml:"access_key"`
|
|
||||||
SecretKey string `toml:"secret_key"`
|
|
||||||
RoleARN string `toml:"role_arn"`
|
|
||||||
Profile string `toml:"profile"`
|
|
||||||
Filename string `toml:"shared_credential_file"`
|
|
||||||
Token string `toml:"token"`
|
|
||||||
|
|
||||||
StreamName string `toml:"streamname"`
|
StreamName string `toml:"streamname"`
|
||||||
PartitionKey string `toml:"partitionkey"`
|
PartitionKey string `toml:"partitionkey"`
|
||||||
Format string `toml:"format"`
|
Format string `toml:"format"`
|
||||||
@@ -32,32 +29,16 @@ type KinesisOutput struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## Amazon REGION of kinesis endpoint.
|
# Amazon REGION of kinesis endpoint.
|
||||||
region = "ap-southeast-2"
|
region = "ap-southeast-2"
|
||||||
|
# Kinesis StreamName must exist prior to starting telegraf.
|
||||||
## Amazon Credentials
|
|
||||||
## Credentials are loaded in the following order
|
|
||||||
## 1) Assumed credentials via STS if role_arn is specified
|
|
||||||
## 2) explicit credentials from 'access_key' and 'secret_key'
|
|
||||||
## 3) shared profile from 'profile'
|
|
||||||
## 4) environment variables
|
|
||||||
## 5) shared credentials file
|
|
||||||
## 6) EC2 Instance Profile
|
|
||||||
#access_key = ""
|
|
||||||
#secret_key = ""
|
|
||||||
#token = ""
|
|
||||||
#role_arn = ""
|
|
||||||
#profile = ""
|
|
||||||
#shared_credential_file = ""
|
|
||||||
|
|
||||||
## Kinesis StreamName must exist prior to starting telegraf.
|
|
||||||
streamname = "StreamName"
|
streamname = "StreamName"
|
||||||
## PartitionKey as used for sharding data.
|
# PartitionKey as used for sharding data.
|
||||||
partitionkey = "PartitionKey"
|
partitionkey = "PartitionKey"
|
||||||
## format of the Data payload in the kinesis PutRecord, supported
|
# format of the Data payload in the kinesis PutRecord, supported
|
||||||
## String and Custom.
|
# String and Custom.
|
||||||
format = "string"
|
format = "string"
|
||||||
## debug will show upstream aws messages.
|
# debug will show upstream aws messages.
|
||||||
debug = false
|
debug = false
|
||||||
`
|
`
|
||||||
|
|
||||||
@@ -83,20 +64,18 @@ func (k *KinesisOutput) Connect() error {
|
|||||||
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
|
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
|
||||||
// environment variables, and then Shared Credentials.
|
// environment variables, and then Shared Credentials.
|
||||||
if k.Debug {
|
if k.Debug {
|
||||||
log.Printf("E! kinesis: Establishing a connection to Kinesis in %+v", k.Region)
|
log.Printf("kinesis: Establishing a connection to Kinesis in %+v", k.Region)
|
||||||
}
|
}
|
||||||
|
Config := &aws.Config{
|
||||||
credentialConfig := &internalaws.CredentialConfig{
|
Region: aws.String(k.Region),
|
||||||
Region: k.Region,
|
Credentials: credentials.NewChainCredentials(
|
||||||
AccessKey: k.AccessKey,
|
[]credentials.Provider{
|
||||||
SecretKey: k.SecretKey,
|
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
|
||||||
RoleARN: k.RoleARN,
|
&credentials.EnvProvider{},
|
||||||
Profile: k.Profile,
|
&credentials.SharedCredentialsProvider{},
|
||||||
Filename: k.Filename,
|
}),
|
||||||
Token: k.Token,
|
|
||||||
}
|
}
|
||||||
configProvider := credentialConfig.Credentials()
|
svc := kinesis.New(session.New(Config))
|
||||||
svc := kinesis.New(configProvider)
|
|
||||||
|
|
||||||
KinesisParams := &kinesis.ListStreamsInput{
|
KinesisParams := &kinesis.ListStreamsInput{
|
||||||
Limit: aws.Int64(100),
|
Limit: aws.Int64(100),
|
||||||
@@ -105,27 +84,27 @@ func (k *KinesisOutput) Connect() error {
|
|||||||
resp, err := svc.ListStreams(KinesisParams)
|
resp, err := svc.ListStreams(KinesisParams)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("E! kinesis: Error in ListSteams API call : %+v \n", err)
|
log.Printf("kinesis: Error in ListSteams API call : %+v \n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if checkstream(resp.StreamNames, k.StreamName) {
|
if checkstream(resp.StreamNames, k.StreamName) {
|
||||||
if k.Debug {
|
if k.Debug {
|
||||||
log.Printf("E! kinesis: Stream Exists")
|
log.Printf("kinesis: Stream Exists")
|
||||||
}
|
}
|
||||||
k.svc = svc
|
k.svc = svc
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
log.Printf("E! kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName)
|
log.Printf("kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *KinesisOutput) Close() error {
|
func (k *KinesisOutput) Close() error {
|
||||||
return nil
|
return errors.New("Error")
|
||||||
}
|
}
|
||||||
|
|
||||||
func FormatMetric(k *KinesisOutput, point telegraf.Metric) (string, error) {
|
func FormatMetric(k *KinesisOutput, point *client.Point) (string, error) {
|
||||||
if k.Format == "string" {
|
if k.Format == "string" {
|
||||||
return point.String(), nil
|
return point.String(), nil
|
||||||
} else {
|
} else {
|
||||||
@@ -147,29 +126,29 @@ func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Du
|
|||||||
if k.Debug {
|
if k.Debug {
|
||||||
resp, err := k.svc.PutRecords(payload)
|
resp, err := k.svc.PutRecords(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
||||||
}
|
}
|
||||||
log.Printf("E! %+v \n", resp)
|
log.Printf("%+v \n", resp)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
_, err := k.svc.PutRecords(payload)
|
_, err := k.svc.PutRecords(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
log.Printf("kinesis: Unable to write to Kinesis : %+v \n", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return time.Since(start)
|
return time.Since(start)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
|
func (k *KinesisOutput) Write(points []*client.Point) error {
|
||||||
var sz uint32 = 0
|
var sz uint32 = 0
|
||||||
|
|
||||||
if len(metrics) == 0 {
|
if len(points) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
r := []*kinesis.PutRecordsRequestEntry{}
|
r := []*kinesis.PutRecordsRequestEntry{}
|
||||||
|
|
||||||
for _, p := range metrics {
|
for _, p := range points {
|
||||||
atomic.AddUint32(&sz, 1)
|
atomic.AddUint32(&sz, 1)
|
||||||
|
|
||||||
metric, _ := FormatMetric(k, p)
|
metric, _ := FormatMetric(k, p)
|
||||||
@@ -182,7 +161,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
|
|||||||
if sz == 500 {
|
if sz == 500 {
|
||||||
// Max Messages Per PutRecordRequest is 500
|
// Max Messages Per PutRecordRequest is 500
|
||||||
elapsed := writekinesis(k, r)
|
elapsed := writekinesis(k, r)
|
||||||
log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed)
|
log.Printf("Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed)
|
||||||
atomic.StoreUint32(&sz, 0)
|
atomic.StoreUint32(&sz, 0)
|
||||||
r = nil
|
r = nil
|
||||||
}
|
}
|
||||||
@@ -194,7 +173,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
outputs.Add("kinesis", func() telegraf.Output {
|
outputs.Add("kinesis", func() outputs.Output {
|
||||||
return &KinesisOutput{}
|
return &KinesisOutput{}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
package kinesis
|
package kinesis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@@ -15,7 +15,7 @@ func TestFormatMetric(t *testing.T) {
|
|||||||
Format: "string",
|
Format: "string",
|
||||||
}
|
}
|
||||||
|
|
||||||
p := testutil.MockMetrics()[0]
|
p := testutil.MockBatchPoints().Points()[0]
|
||||||
|
|
||||||
valid_string := "test1,tag1=value1 value=1 1257894000000000000"
|
valid_string := "test1,tag1=value1 value=1 1257894000000000000"
|
||||||
func_string, err := FormatMetric(k, p)
|
func_string, err := FormatMetric(k, p)
|
||||||
175
outputs/librato/librato.go
Normal file
175
outputs/librato/librato.go
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
package librato
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/internal"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Librato struct {
|
||||||
|
ApiUser string
|
||||||
|
ApiToken string
|
||||||
|
SourceTag string
|
||||||
|
Timeout internal.Duration
|
||||||
|
|
||||||
|
apiUrl string
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# Librator API Docs
|
||||||
|
# http://dev.librato.com/v1/metrics-authentication
|
||||||
|
|
||||||
|
# Librato API user
|
||||||
|
api_user = "telegraf@influxdb.com" # required.
|
||||||
|
|
||||||
|
# Librato API token
|
||||||
|
api_token = "my-secret-token" # required.
|
||||||
|
|
||||||
|
# Tag Field to populate source attribute (optional)
|
||||||
|
# This is typically the _hostname_ from which the metric was obtained.
|
||||||
|
source_tag = "hostname"
|
||||||
|
|
||||||
|
# Connection timeout.
|
||||||
|
# timeout = "5s"
|
||||||
|
`
|
||||||
|
|
||||||
|
type Metrics struct {
|
||||||
|
Gauges []*Gauge `json:"gauges"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Gauge struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Value float64 `json:"value"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
MeasureTime int64 `json:"measure_time"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const librato_api = "https://metrics-api.librato.com/v1/metrics"
|
||||||
|
|
||||||
|
func NewLibrato(apiUrl string) *Librato {
|
||||||
|
return &Librato{
|
||||||
|
apiUrl: apiUrl,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Librato) Connect() error {
|
||||||
|
if l.ApiUser == "" || l.ApiToken == "" {
|
||||||
|
return fmt.Errorf("api_user and api_token are required fields for librato output")
|
||||||
|
}
|
||||||
|
l.client = &http.Client{
|
||||||
|
Timeout: l.Timeout.Duration,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Librato) Write(points []*client.Point) error {
|
||||||
|
if len(points) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
metrics := Metrics{}
|
||||||
|
tempGauges := []*Gauge{}
|
||||||
|
metricCounter := 0
|
||||||
|
|
||||||
|
for _, pt := range points {
|
||||||
|
if gauges, err := l.buildGauges(pt); err == nil {
|
||||||
|
for _, gauge := range gauges {
|
||||||
|
tempGauges = append(tempGauges, gauge)
|
||||||
|
metricCounter++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("unable to build Gauge for %s, skipping\n", pt.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.Gauges = make([]*Gauge, metricCounter)
|
||||||
|
copy(metrics.Gauges, tempGauges[0:])
|
||||||
|
metricsBytes, err := json.Marshal(metrics)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
|
||||||
|
}
|
||||||
|
req.Header.Add("Content-Type", "application/json")
|
||||||
|
req.SetBasicAuth(l.ApiUser, l.ApiToken)
|
||||||
|
|
||||||
|
resp, err := l.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Librato) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Librato) Description() string {
|
||||||
|
return "Configuration for Librato API to send metrics to."
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Librato) buildGauges(pt *client.Point) ([]*Gauge, error) {
|
||||||
|
gauges := []*Gauge{}
|
||||||
|
for fieldName, value := range pt.Fields() {
|
||||||
|
gauge := &Gauge{
|
||||||
|
Name: pt.Name() + "_" + fieldName,
|
||||||
|
MeasureTime: pt.Time().Unix(),
|
||||||
|
}
|
||||||
|
if err := gauge.setValue(value); err != nil {
|
||||||
|
return gauges, fmt.Errorf("unable to extract value from Fields, %s\n",
|
||||||
|
err.Error())
|
||||||
|
}
|
||||||
|
if l.SourceTag != "" {
|
||||||
|
if source, ok := pt.Tags()[l.SourceTag]; ok {
|
||||||
|
gauge.Source = source
|
||||||
|
} else {
|
||||||
|
return gauges,
|
||||||
|
fmt.Errorf("undeterminable Source type from Field, %s\n",
|
||||||
|
l.SourceTag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return gauges, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Gauge) setValue(v interface{}) error {
|
||||||
|
switch d := v.(type) {
|
||||||
|
case int:
|
||||||
|
g.Value = float64(int(d))
|
||||||
|
case int32:
|
||||||
|
g.Value = float64(int32(d))
|
||||||
|
case int64:
|
||||||
|
g.Value = float64(int64(d))
|
||||||
|
case float32:
|
||||||
|
g.Value = float64(d)
|
||||||
|
case float64:
|
||||||
|
g.Value = float64(d)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("undeterminable type %+v", d)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Librato) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("librato", func() outputs.Output {
|
||||||
|
return NewLibrato(librato_api)
|
||||||
|
})
|
||||||
|
}
|
||||||
212
outputs/librato/librato_test.go
Normal file
212
outputs/librato/librato_test.go
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
package librato
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
fakeUrl = "http://test.librato.com"
|
||||||
|
fakeUser = "telegraf@influxdb.com"
|
||||||
|
fakeToken = "123456"
|
||||||
|
)
|
||||||
|
|
||||||
|
func fakeLibrato() *Librato {
|
||||||
|
l := NewLibrato(fakeUrl)
|
||||||
|
l.ApiUser = fakeUser
|
||||||
|
l.ApiToken = fakeToken
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUriOverride(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
l := NewLibrato(ts.URL)
|
||||||
|
l.ApiUser = "telegraf@influxdb.com"
|
||||||
|
l.ApiToken = "123456"
|
||||||
|
err := l.Connect()
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = l.Write(testutil.MockBatchPoints().Points())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBadStatusCode(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusServiceUnavailable)
|
||||||
|
json.NewEncoder(w).Encode(`{
|
||||||
|
"errors": {
|
||||||
|
"system": [
|
||||||
|
"The API is currently down for maintenance. It'll be back shortly."
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
l := NewLibrato(ts.URL)
|
||||||
|
l.ApiUser = "telegraf@influxdb.com"
|
||||||
|
l.ApiToken = "123456"
|
||||||
|
err := l.Connect()
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = l.Write(testutil.MockBatchPoints().Points())
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("error expected but none returned")
|
||||||
|
} else {
|
||||||
|
require.EqualError(t, fmt.Errorf("received bad status code, 503\n"), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildGauge(t *testing.T) {
|
||||||
|
var gaugeTests = []struct {
|
||||||
|
ptIn *client.Point
|
||||||
|
outGauge *Gauge
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
testutil.TestPoint(0.0, "test1"),
|
||||||
|
&Gauge{
|
||||||
|
Name: "test1",
|
||||||
|
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||||
|
Value: 0.0,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testutil.TestPoint(1.0, "test2"),
|
||||||
|
&Gauge{
|
||||||
|
Name: "test2",
|
||||||
|
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||||
|
Value: 1.0,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testutil.TestPoint(10, "test3"),
|
||||||
|
&Gauge{
|
||||||
|
Name: "test3",
|
||||||
|
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||||
|
Value: 10.0,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testutil.TestPoint(int32(112345), "test4"),
|
||||||
|
&Gauge{
|
||||||
|
Name: "test4",
|
||||||
|
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||||
|
Value: 112345.0,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testutil.TestPoint(int64(112345), "test5"),
|
||||||
|
&Gauge{
|
||||||
|
Name: "test5",
|
||||||
|
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||||
|
Value: 112345.0,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testutil.TestPoint(float32(11234.5), "test6"),
|
||||||
|
&Gauge{
|
||||||
|
Name: "test6",
|
||||||
|
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||||
|
Value: 11234.5,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testutil.TestPoint("11234.5", "test7"),
|
||||||
|
&Gauge{
|
||||||
|
Name: "test7",
|
||||||
|
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||||
|
Value: 11234.5,
|
||||||
|
},
|
||||||
|
fmt.Errorf("unable to extract value from Fields, undeterminable type"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
l := NewLibrato(fakeUrl)
|
||||||
|
for _, gt := range gaugeTests {
|
||||||
|
gauge, err := l.buildGauge(gt.ptIn)
|
||||||
|
if err != nil && gt.err == nil {
|
||||||
|
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
|
||||||
|
}
|
||||||
|
if gt.err != nil && err == nil {
|
||||||
|
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil {
|
||||||
|
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildGaugeWithSource(t *testing.T) {
|
||||||
|
pt1, _ := client.NewPoint(
|
||||||
|
"test1",
|
||||||
|
map[string]string{"hostname": "192.168.0.1"},
|
||||||
|
map[string]interface{}{"value": 0.0},
|
||||||
|
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||||
|
)
|
||||||
|
pt2, _ := client.NewPoint(
|
||||||
|
"test2",
|
||||||
|
map[string]string{"hostnam": "192.168.0.1"},
|
||||||
|
map[string]interface{}{"value": 1.0},
|
||||||
|
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC),
|
||||||
|
)
|
||||||
|
var gaugeTests = []struct {
|
||||||
|
ptIn *client.Point
|
||||||
|
outGauge *Gauge
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
|
||||||
|
{
|
||||||
|
pt1,
|
||||||
|
&Gauge{
|
||||||
|
Name: "test1",
|
||||||
|
MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||||
|
Value: 0.0,
|
||||||
|
Source: "192.168.0.1",
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pt2,
|
||||||
|
&Gauge{
|
||||||
|
Name: "test2",
|
||||||
|
MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(),
|
||||||
|
Value: 1.0,
|
||||||
|
},
|
||||||
|
fmt.Errorf("undeterminable Source type from Field, hostname"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
l := NewLibrato(fakeUrl)
|
||||||
|
l.SourceTag = "hostname"
|
||||||
|
for _, gt := range gaugeTests {
|
||||||
|
gauge, err := l.buildGauge(gt.ptIn)
|
||||||
|
if err != nil && gt.err == nil {
|
||||||
|
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
|
||||||
|
}
|
||||||
|
if gt.err != nil && err == nil {
|
||||||
|
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil {
|
||||||
|
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
190
outputs/mqtt/mqtt.go
Normal file
190
outputs/mqtt/mqtt.go
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
package mqtt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/internal"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const MaxClientIdLen = 8
|
||||||
|
const MaxRetryCount = 3
|
||||||
|
const ClientIdPrefix = "telegraf"
|
||||||
|
|
||||||
|
type MQTT struct {
|
||||||
|
Servers []string `toml:"servers"`
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
Database string
|
||||||
|
Timeout internal.Duration
|
||||||
|
TopicPrefix string
|
||||||
|
|
||||||
|
Client *paho.Client
|
||||||
|
Opts *paho.ClientOptions
|
||||||
|
sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
servers = ["localhost:1883"] # required.
|
||||||
|
|
||||||
|
# MQTT outputs send metrics to this topic format
|
||||||
|
# "<topic_prefix>/host/<hostname>/<pluginname>/"
|
||||||
|
# ex: prefix/host/web01.example.com/mem/available
|
||||||
|
# topic_prefix = "prefix"
|
||||||
|
|
||||||
|
# username and password to connect MQTT server.
|
||||||
|
# username = "telegraf"
|
||||||
|
# password = "metricsmetricsmetricsmetrics"
|
||||||
|
`
|
||||||
|
|
||||||
|
func (m *MQTT) Connect() error {
|
||||||
|
var err error
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
|
||||||
|
m.Opts, err = m.CreateOpts()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Client = paho.NewClient(m.Opts)
|
||||||
|
if token := m.Client.Connect(); token.Wait() && token.Error() != nil {
|
||||||
|
return token.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MQTT) Close() error {
|
||||||
|
if m.Client.IsConnected() {
|
||||||
|
m.Client.Disconnect(20)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MQTT) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MQTT) Description() string {
|
||||||
|
return "Configuration for MQTT server to send metrics to"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MQTT) Write(points []*client.Point) error {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
if len(points) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
hostname, ok := points[0].Tags()["host"]
|
||||||
|
if !ok {
|
||||||
|
hostname = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range points {
|
||||||
|
var t []string
|
||||||
|
if m.TopicPrefix != "" {
|
||||||
|
t = append(t, m.TopicPrefix)
|
||||||
|
}
|
||||||
|
tm := strings.Split(p.Name(), "_")
|
||||||
|
if len(tm) < 2 {
|
||||||
|
tm = []string{p.Name(), "stat"}
|
||||||
|
}
|
||||||
|
|
||||||
|
t = append(t, "host", hostname, tm[0], tm[1])
|
||||||
|
topic := strings.Join(t, "/")
|
||||||
|
|
||||||
|
value := p.String()
|
||||||
|
err := m.publish(topic, value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not write to MQTT server, %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MQTT) publish(topic, body string) error {
|
||||||
|
token := m.Client.Publish(topic, 0, false, body)
|
||||||
|
token.Wait()
|
||||||
|
if token.Error() != nil {
|
||||||
|
return token.Error()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MQTT) CreateOpts() (*paho.ClientOptions, error) {
|
||||||
|
opts := paho.NewClientOptions()
|
||||||
|
|
||||||
|
clientId := getRandomClientId()
|
||||||
|
opts.SetClientID(clientId)
|
||||||
|
|
||||||
|
TLSConfig := &tls.Config{InsecureSkipVerify: false}
|
||||||
|
ca := "" // TODO
|
||||||
|
scheme := "tcp"
|
||||||
|
if ca != "" {
|
||||||
|
scheme = "ssl"
|
||||||
|
certPool, err := getCertPool(ca)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
TLSConfig.RootCAs = certPool
|
||||||
|
}
|
||||||
|
TLSConfig.InsecureSkipVerify = true // TODO
|
||||||
|
opts.SetTLSConfig(TLSConfig)
|
||||||
|
|
||||||
|
user := m.Username
|
||||||
|
if user == "" {
|
||||||
|
opts.SetUsername(user)
|
||||||
|
}
|
||||||
|
password := m.Password
|
||||||
|
if password != "" {
|
||||||
|
opts.SetPassword(password)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(m.Servers) == 0 {
|
||||||
|
return opts, fmt.Errorf("could not get host infomations")
|
||||||
|
}
|
||||||
|
for _, host := range m.Servers {
|
||||||
|
server := fmt.Sprintf("%s://%s", scheme, host)
|
||||||
|
|
||||||
|
opts.AddBroker(server)
|
||||||
|
}
|
||||||
|
opts.SetAutoReconnect(true)
|
||||||
|
return opts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRandomClientId() string {
|
||||||
|
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||||
|
var bytes = make([]byte, MaxClientIdLen)
|
||||||
|
rand.Read(bytes)
|
||||||
|
for i, b := range bytes {
|
||||||
|
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||||
|
}
|
||||||
|
return ClientIdPrefix + "-" + string(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCertPool(pemPath string) (*x509.CertPool, error) {
|
||||||
|
certs := x509.NewCertPool()
|
||||||
|
|
||||||
|
pemData, err := ioutil.ReadFile(pemPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
certs.AppendCertsFromPEM(pemData)
|
||||||
|
return certs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("mqtt", func() outputs.Output {
|
||||||
|
return &MQTT{}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -3,9 +3,7 @@ package mqtt
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/serializers"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -15,10 +13,8 @@ func TestConnectAndWrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var url = testutil.GetLocalHost() + ":1883"
|
var url = testutil.GetLocalHost() + ":1883"
|
||||||
s, _ := serializers.NewInfluxSerializer()
|
|
||||||
m := &MQTT{
|
m := &MQTT{
|
||||||
Servers: []string{url},
|
Servers: []string{url},
|
||||||
serializer: s,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that we can connect to the MQTT broker
|
// Verify that we can connect to the MQTT broker
|
||||||
@@ -26,6 +22,6 @@ func TestConnectAndWrite(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Verify that we can successfully write data to the mqtt broker
|
// Verify that we can successfully write data to the mqtt broker
|
||||||
err = m.Write(testutil.MockMetrics())
|
err = m.Write(testutil.MockBatchPoints().Points())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
71
outputs/nsq/nsq.go
Normal file
71
outputs/nsq/nsq.go
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
package nsq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
"github.com/nsqio/go-nsq"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NSQ struct {
|
||||||
|
Server string
|
||||||
|
Topic string
|
||||||
|
producer *nsq.Producer
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# Location of nsqd instance listening on TCP
|
||||||
|
server = "localhost:4150"
|
||||||
|
# NSQ topic for producer messages
|
||||||
|
topic = "telegraf"
|
||||||
|
`
|
||||||
|
|
||||||
|
func (n *NSQ) Connect() error {
|
||||||
|
config := nsq.NewConfig()
|
||||||
|
producer, err := nsq.NewProducer(n.Server, config)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
n.producer = producer
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NSQ) Close() error {
|
||||||
|
n.producer.Stop()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NSQ) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NSQ) Description() string {
|
||||||
|
return "Send telegraf measurements to NSQD"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NSQ) Write(points []*client.Point) error {
|
||||||
|
if len(points) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range points {
|
||||||
|
// Combine tags from Point and BatchPoints and grab the resulting
|
||||||
|
// line-protocol output string to write to NSQ
|
||||||
|
value := p.String()
|
||||||
|
|
||||||
|
err := n.producer.Publish(n.Topic, []byte(value))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("FAILED to send NSQD message: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("nsq", func() outputs.Output {
|
||||||
|
return &NSQ{}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -3,8 +3,7 @@ package nsq
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/plugins/serializers"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -14,11 +13,9 @@ func TestConnectAndWrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
server := []string{testutil.GetLocalHost() + ":4150"}
|
server := []string{testutil.GetLocalHost() + ":4150"}
|
||||||
s, _ := serializers.NewInfluxSerializer()
|
|
||||||
n := &NSQ{
|
n := &NSQ{
|
||||||
Server: server[0],
|
Server: server[0],
|
||||||
Topic: "telegraf",
|
Topic: "telegraf",
|
||||||
serializer: s,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that we can connect to the NSQ daemon
|
// Verify that we can connect to the NSQ daemon
|
||||||
@@ -26,6 +23,6 @@ func TestConnectAndWrite(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Verify that we can successfully write data to the NSQ daemon
|
// Verify that we can successfully write data to the NSQ daemon
|
||||||
err = n.Write(testutil.MockMetrics())
|
err = n.Write(testutil.MockBatchPoints().Points())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -1,12 +1,6 @@
|
|||||||
# OpenTSDB Output Plugin
|
# OpenTSDB Output Plugin
|
||||||
|
|
||||||
This plugin writes to an OpenTSDB instance using either the "telnet" or Http mode.
|
This plugin writes to a OpenTSDB instance using the "telnet" mode
|
||||||
|
|
||||||
Using the Http API is the recommended way of writing metrics since OpenTSDB 2.0
|
|
||||||
To use Http mode, set useHttp to true in config. You can also control how many
|
|
||||||
metrics is sent in each http request by setting batchSize in config.
|
|
||||||
|
|
||||||
See http://opentsdb.net/docs/build/html/api_http/put.html for details.
|
|
||||||
|
|
||||||
## Transfer "Protocol" in the telnet mode
|
## Transfer "Protocol" in the telnet mode
|
||||||
|
|
||||||
@@ -16,14 +10,14 @@ The expected input from OpenTSDB is specified in the following way:
|
|||||||
put <metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
|
put <metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
|
||||||
```
|
```
|
||||||
|
|
||||||
The telegraf output plugin adds an optional prefix to the metric keys so
|
The telegraf output plugin adds an optional prefix to the metric keys so
|
||||||
that a subamount can be selected.
|
that a subamount can be selected.
|
||||||
|
|
||||||
```
|
```
|
||||||
put <[prefix.]metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
|
put <[prefix.]metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
|
|
||||||
```
|
```
|
||||||
put nine.telegraf.system_load1 1441910356 0.430000 dc=homeoffice host=irimame scope=green
|
put nine.telegraf.system_load1 1441910356 0.430000 dc=homeoffice host=irimame scope=green
|
||||||
@@ -44,12 +38,12 @@ put nine.telegraf.ping_average_response_ms 1441910366 24.006000 dc=homeoffice ho
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
The OpenTSDB telnet interface can be simulated with this reader:
|
The OpenTSDB interface can be simulated with this reader:
|
||||||
|
|
||||||
```
|
```
|
||||||
// opentsdb_telnet_mode_mock.go
|
// opentsdb_telnet_mode_mock.go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -81,4 +75,4 @@ func main() {
|
|||||||
|
|
||||||
## Allowed values for metrics
|
## Allowed values for metrics
|
||||||
|
|
||||||
OpenTSDB allows `integers` and `floats` as input values
|
OpenTSDB allows `integers` and `floats` as input values
|
||||||
168
outputs/opentsdb/opentsdb.go
Normal file
168
outputs/opentsdb/opentsdb.go
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
package opentsdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OpenTSDB struct {
|
||||||
|
Prefix string
|
||||||
|
|
||||||
|
Host string
|
||||||
|
Port int
|
||||||
|
|
||||||
|
Debug bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# prefix for metrics keys
|
||||||
|
prefix = "my.specific.prefix."
|
||||||
|
|
||||||
|
## Telnet Mode ##
|
||||||
|
# DNS name of the OpenTSDB server in telnet mode
|
||||||
|
host = "opentsdb.example.com"
|
||||||
|
|
||||||
|
# Port of the OpenTSDB server in telnet mode
|
||||||
|
port = 4242
|
||||||
|
|
||||||
|
# Debug true - Prints OpenTSDB communication
|
||||||
|
debug = false
|
||||||
|
`
|
||||||
|
|
||||||
|
type MetricLine struct {
|
||||||
|
Metric string
|
||||||
|
Timestamp int64
|
||||||
|
Value string
|
||||||
|
Tags string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *OpenTSDB) Connect() error {
|
||||||
|
// Test Connection to OpenTSDB Server
|
||||||
|
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||||
|
tcpAddr, err := net.ResolveTCPAddr("tcp", uri)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("OpenTSDB: TCP address cannot be resolved")
|
||||||
|
}
|
||||||
|
connection, err := net.DialTCP("tcp", nil, tcpAddr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("OpenTSDB: Telnet connect fail")
|
||||||
|
}
|
||||||
|
defer connection.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *OpenTSDB) Write(points []*client.Point) error {
|
||||||
|
if len(points) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// Send Data with telnet / socket communication
|
||||||
|
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||||
|
tcpAddr, _ := net.ResolveTCPAddr("tcp", uri)
|
||||||
|
connection, err := net.DialTCP("tcp", nil, tcpAddr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("OpenTSDB: Telnet connect fail")
|
||||||
|
}
|
||||||
|
defer connection.Close()
|
||||||
|
|
||||||
|
for _, pt := range points {
|
||||||
|
for _, metric := range buildMetrics(pt, now, o.Prefix) {
|
||||||
|
messageLine := fmt.Sprintf("put %s %v %s %s\n",
|
||||||
|
metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
|
||||||
|
if o.Debug {
|
||||||
|
fmt.Print(messageLine)
|
||||||
|
}
|
||||||
|
_, err := connection.Write([]byte(messageLine))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildTags(ptTags map[string]string) []string {
|
||||||
|
tags := make([]string, len(ptTags))
|
||||||
|
index := 0
|
||||||
|
for k, v := range ptTags {
|
||||||
|
tags[index] = fmt.Sprintf("%s=%s", k, v)
|
||||||
|
index += 1
|
||||||
|
}
|
||||||
|
sort.Strings(tags)
|
||||||
|
return tags
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildMetrics(pt *client.Point, now time.Time, prefix string) []*MetricLine {
|
||||||
|
ret := []*MetricLine{}
|
||||||
|
for fieldName, value := range pt.Fields() {
|
||||||
|
metric := &MetricLine{
|
||||||
|
Metric: fmt.Sprintf("%s%s_%s", prefix, pt.Name(), fieldName),
|
||||||
|
Timestamp: now.Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
metricValue, buildError := buildValue(value)
|
||||||
|
if buildError != nil {
|
||||||
|
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
metric.Value = metricValue
|
||||||
|
tagsSlice := buildTags(pt.Tags())
|
||||||
|
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
|
||||||
|
ret = append(ret, metric)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildValue(v interface{}) (string, error) {
|
||||||
|
var retv string
|
||||||
|
switch p := v.(type) {
|
||||||
|
case int64:
|
||||||
|
retv = IntToString(int64(p))
|
||||||
|
case uint64:
|
||||||
|
retv = UIntToString(uint64(p))
|
||||||
|
case float64:
|
||||||
|
retv = FloatToString(float64(p))
|
||||||
|
default:
|
||||||
|
return retv, fmt.Errorf("unexpected type %T with value %v for OpenTSDB", v, v)
|
||||||
|
}
|
||||||
|
return retv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func IntToString(input_num int64) string {
|
||||||
|
return strconv.FormatInt(input_num, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UIntToString(input_num uint64) string {
|
||||||
|
return strconv.FormatUint(input_num, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FloatToString(input_num float64) string {
|
||||||
|
return strconv.FormatFloat(input_num, 'f', 6, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *OpenTSDB) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *OpenTSDB) Description() string {
|
||||||
|
return "Configuration for OpenTSDB server to send metrics to"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *OpenTSDB) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("opentsdb", func() outputs.Output {
|
||||||
|
return &OpenTSDB{}
|
||||||
|
})
|
||||||
|
}
|
||||||
71
outputs/opentsdb/opentsdb_test.go
Normal file
71
outputs/opentsdb/opentsdb_test.go
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
package opentsdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdb/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBuildTagsTelnet(t *testing.T) {
|
||||||
|
var tagtests = []struct {
|
||||||
|
ptIn map[string]string
|
||||||
|
outTags []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
map[string]string{"one": "two", "three": "four"},
|
||||||
|
[]string{"one=two", "three=four"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]string{"aaa": "bbb"},
|
||||||
|
[]string{"aaa=bbb"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]string{"one": "two", "aaa": "bbb"},
|
||||||
|
[]string{"aaa=bbb", "one=two"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
map[string]string{},
|
||||||
|
[]string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tagtests {
|
||||||
|
tags := buildTags(tt.ptIn)
|
||||||
|
if !reflect.DeepEqual(tags, tt.outTags) {
|
||||||
|
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWrite(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
o := &OpenTSDB{
|
||||||
|
Host: testutil.GetLocalHost(),
|
||||||
|
Port: 4242,
|
||||||
|
Prefix: "prefix.test.",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that we can connect to the OpenTSDB instance
|
||||||
|
err := o.Connect()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify that we can successfully write data to OpenTSDB
|
||||||
|
err = o.Write(testutil.MockBatchPoints().Points())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify postive and negative test cases of writing data
|
||||||
|
bp := testutil.MockBatchPoints()
|
||||||
|
bp.AddPoint(testutil.TestPoint(float64(1.0), "justametric.float"))
|
||||||
|
bp.AddPoint(testutil.TestPoint(int64(123456789), "justametric.int"))
|
||||||
|
bp.AddPoint(testutil.TestPoint(uint64(123456789012345), "justametric.uint"))
|
||||||
|
bp.AddPoint(testutil.TestPoint("Lorem Ipsum", "justametric.string"))
|
||||||
|
bp.AddPoint(testutil.TestPoint(float64(42.0), "justametric.anotherfloat"))
|
||||||
|
|
||||||
|
err = o.Write(bp.Points())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
}
|
||||||
125
outputs/prometheus_client/prometheus_client.go
Normal file
125
outputs/prometheus_client/prometheus_client.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
package prometheus_client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PrometheusClient struct {
|
||||||
|
Listen string
|
||||||
|
metrics map[string]*prometheus.UntypedVec
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# Address to listen on
|
||||||
|
# listen = ":9126"
|
||||||
|
`
|
||||||
|
|
||||||
|
func (p *PrometheusClient) Start() error {
|
||||||
|
if p.Listen == "" {
|
||||||
|
p.Listen = "localhost:9126"
|
||||||
|
}
|
||||||
|
|
||||||
|
http.Handle("/metrics", prometheus.Handler())
|
||||||
|
server := &http.Server{
|
||||||
|
Addr: p.Listen,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.metrics = make(map[string]*prometheus.UntypedVec)
|
||||||
|
go server.ListenAndServe()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PrometheusClient) Stop() {
|
||||||
|
// TODO: Use a listener for http.Server that counts active connections
|
||||||
|
// that can be stopped and closed gracefully
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PrometheusClient) Connect() error {
|
||||||
|
// This service output does not need to make any further connections
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PrometheusClient) Close() error {
|
||||||
|
// This service output does not need to close any of its connections
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PrometheusClient) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PrometheusClient) Description() string {
|
||||||
|
return "Configuration for the Prometheus client to spawn"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PrometheusClient) Write(points []*client.Point) error {
|
||||||
|
if len(points) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, point := range points {
|
||||||
|
var labels []string
|
||||||
|
key := point.Name()
|
||||||
|
|
||||||
|
for k, _ := range point.Tags() {
|
||||||
|
if len(k) > 0 {
|
||||||
|
labels = append(labels, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := p.metrics[key]; !ok {
|
||||||
|
p.metrics[key] = prometheus.NewUntypedVec(
|
||||||
|
prometheus.UntypedOpts{
|
||||||
|
Name: key,
|
||||||
|
Help: fmt.Sprintf("Telegraf collected point '%s'", key),
|
||||||
|
},
|
||||||
|
labels,
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(p.metrics[key])
|
||||||
|
}
|
||||||
|
|
||||||
|
l := prometheus.Labels{}
|
||||||
|
for tk, tv := range point.Tags() {
|
||||||
|
l[tk] = tv
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, val := range point.Fields() {
|
||||||
|
switch val := val.(type) {
|
||||||
|
default:
|
||||||
|
log.Printf("Prometheus output, unsupported type. key: %s, type: %T\n",
|
||||||
|
key, val)
|
||||||
|
case int64:
|
||||||
|
m, err := p.metrics[key].GetMetricWith(l)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR Getting metric in Prometheus output, "+
|
||||||
|
"key: %s, labels: %v,\nerr: %s\n",
|
||||||
|
key, l, err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m.Set(float64(val))
|
||||||
|
case float64:
|
||||||
|
m, err := p.metrics[key].GetMetricWith(l)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ERROR Getting metric in Prometheus output, "+
|
||||||
|
"key: %s, labels: %v,\nerr: %s\n",
|
||||||
|
key, l, err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m.Set(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("prometheus_client", func() outputs.Output {
|
||||||
|
return &PrometheusClient{}
|
||||||
|
})
|
||||||
|
}
|
||||||
98
outputs/prometheus_client/prometheus_client_test.go
Normal file
98
outputs/prometheus_client/prometheus_client_test.go
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
package prometheus_client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/plugins/prometheus"
|
||||||
|
"github.com/influxdb/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var pTesting *PrometheusClient
|
||||||
|
|
||||||
|
func TestPrometheusWritePointEmptyTag(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &prometheus.Prometheus{
|
||||||
|
Urls: []string{"http://localhost:9126/metrics"},
|
||||||
|
}
|
||||||
|
tags := make(map[string]string)
|
||||||
|
pt1, _ := client.NewPoint(
|
||||||
|
"test_point_1",
|
||||||
|
tags,
|
||||||
|
map[string]interface{}{"value": 0.0})
|
||||||
|
pt2, _ := client.NewPoint(
|
||||||
|
"test_point_2",
|
||||||
|
tags,
|
||||||
|
map[string]interface{}{"value": 1.0})
|
||||||
|
var points = []*client.Point{
|
||||||
|
pt1,
|
||||||
|
pt2,
|
||||||
|
}
|
||||||
|
require.NoError(t, pTesting.Write(points))
|
||||||
|
|
||||||
|
expected := []struct {
|
||||||
|
name string
|
||||||
|
value float64
|
||||||
|
tags map[string]string
|
||||||
|
}{
|
||||||
|
{"test_point_1", 0.0, tags},
|
||||||
|
{"test_point_2", 1.0, tags},
|
||||||
|
}
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
require.NoError(t, p.Gather(&acc))
|
||||||
|
for _, e := range expected {
|
||||||
|
assert.NoError(t, acc.ValidateValue(e.name, e.value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrometheusWritePointTag(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &prometheus.Prometheus{
|
||||||
|
Urls: []string{"http://localhost:9126/metrics"},
|
||||||
|
}
|
||||||
|
tags := make(map[string]string)
|
||||||
|
tags["testtag"] = "testvalue"
|
||||||
|
pt1, _ := client.NewPoint(
|
||||||
|
"test_point_3",
|
||||||
|
tags,
|
||||||
|
map[string]interface{}{"value": 0.0})
|
||||||
|
pt2, _ := client.NewPoint(
|
||||||
|
"test_point_4",
|
||||||
|
tags,
|
||||||
|
map[string]interface{}{"value": 1.0})
|
||||||
|
var points = []*client.Point{
|
||||||
|
pt1,
|
||||||
|
pt2,
|
||||||
|
}
|
||||||
|
require.NoError(t, pTesting.Write(points))
|
||||||
|
|
||||||
|
expected := []struct {
|
||||||
|
name string
|
||||||
|
value float64
|
||||||
|
}{
|
||||||
|
{"test_point_3", 0.0},
|
||||||
|
{"test_point_4", 1.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
require.NoError(t, p.Gather(&acc))
|
||||||
|
for _, e := range expected {
|
||||||
|
assert.True(t, acc.CheckTaggedValue(e.name, e.value, tags))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
pTesting = &PrometheusClient{Listen: "localhost:9126"}
|
||||||
|
pTesting.Start()
|
||||||
|
}
|
||||||
@@ -1,4 +1,8 @@
|
|||||||
package telegraf
|
package outputs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
)
|
||||||
|
|
||||||
type Output interface {
|
type Output interface {
|
||||||
// Connect to the Output
|
// Connect to the Output
|
||||||
@@ -10,7 +14,7 @@ type Output interface {
|
|||||||
// SampleConfig returns the default configuration of the Output
|
// SampleConfig returns the default configuration of the Output
|
||||||
SampleConfig() string
|
SampleConfig() string
|
||||||
// Write takes in group of points to be written to the Output
|
// Write takes in group of points to be written to the Output
|
||||||
Write(metrics []Metric) error
|
Write(points []*client.Point) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type ServiceOutput interface {
|
type ServiceOutput interface {
|
||||||
@@ -23,9 +27,17 @@ type ServiceOutput interface {
|
|||||||
// SampleConfig returns the default configuration of the Output
|
// SampleConfig returns the default configuration of the Output
|
||||||
SampleConfig() string
|
SampleConfig() string
|
||||||
// Write takes in group of points to be written to the Output
|
// Write takes in group of points to be written to the Output
|
||||||
Write(metrics []Metric) error
|
Write(points []*client.Point) error
|
||||||
// Start the "service" that will provide an Output
|
// Start the "service" that will provide an Output
|
||||||
Start() error
|
Start() error
|
||||||
// Stop the "service" that will provide an Output
|
// Stop the "service" that will provide an Output
|
||||||
Stop()
|
Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Creator func() Output
|
||||||
|
|
||||||
|
var Outputs = map[string]Creator{}
|
||||||
|
|
||||||
|
func Add(name string, creator Creator) {
|
||||||
|
Outputs[name] = creator
|
||||||
|
}
|
||||||
101
outputs/riemann/riemann.go
Normal file
101
outputs/riemann/riemann.go
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
package riemann
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/amir/raidman"
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
"github.com/influxdb/telegraf/outputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Riemann struct {
|
||||||
|
URL string
|
||||||
|
Transport string
|
||||||
|
|
||||||
|
client *raidman.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# URL of server
|
||||||
|
url = "localhost:5555"
|
||||||
|
# transport protocol to use either tcp or udp
|
||||||
|
transport = "tcp"
|
||||||
|
`
|
||||||
|
|
||||||
|
func (r *Riemann) Connect() error {
|
||||||
|
c, err := raidman.Dial(r.Transport, r.URL)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.client = c
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Riemann) Close() error {
|
||||||
|
r.client.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Riemann) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Riemann) Description() string {
|
||||||
|
return "Configuration for the Riemann server to send metrics to"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Riemann) Write(points []*client.Point) error {
|
||||||
|
if len(points) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var events []*raidman.Event
|
||||||
|
for _, p := range points {
|
||||||
|
evs := buildEvents(p)
|
||||||
|
for _, ev := range evs {
|
||||||
|
events = append(events, ev)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var senderr = r.client.SendMulti(events)
|
||||||
|
if senderr != nil {
|
||||||
|
return errors.New(fmt.Sprintf("FAILED to send riemann message: %s\n",
|
||||||
|
senderr))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildEvents(p *client.Point) []*raidman.Event {
|
||||||
|
events := []*raidman.Event{}
|
||||||
|
for fieldName, value := range p.Fields() {
|
||||||
|
host, ok := p.Tags()["host"]
|
||||||
|
if !ok {
|
||||||
|
hostname, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
host = "unknown"
|
||||||
|
} else {
|
||||||
|
host = hostname
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
event := &raidman.Event{
|
||||||
|
Host: host,
|
||||||
|
Service: p.Name() + "_" + fieldName,
|
||||||
|
Metric: value,
|
||||||
|
}
|
||||||
|
events = append(events, event)
|
||||||
|
}
|
||||||
|
|
||||||
|
return events
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("riemann", func() outputs.Output {
|
||||||
|
return &Riemann{}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -3,7 +3,7 @@ package riemann
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdb/telegraf/testutil"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -22,6 +22,6 @@ func TestConnectAndWrite(t *testing.T) {
|
|||||||
err := r.Connect()
|
err := r.Connect()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = r.Write(testutil.MockMetrics())
|
err = r.Write(testutil.MockBatchPoints().Points())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
265
plugins/aerospike/README.md
Normal file
265
plugins/aerospike/README.md
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
## Telegraf Plugin: Aerospike
|
||||||
|
|
||||||
|
#### Plugin arguments:
|
||||||
|
- **servers** string array: List of aerospike servers to query (def: 127.0.0.1:3000)
|
||||||
|
|
||||||
|
#### Description
|
||||||
|
|
||||||
|
The aerospike plugin queries aerospike server(s) and get node statistics. It also collects stats for
|
||||||
|
all the configured namespaces.
|
||||||
|
|
||||||
|
For what the measurements mean, please consult the [Aerospike Metrics Reference Docs](http://www.aerospike.com/docs/reference/metrics).
|
||||||
|
|
||||||
|
The metric names, to make it less complicated in querying, have replaced all `-` with `_` as Aerospike metrics come in both forms (no idea why).
|
||||||
|
|
||||||
|
# Measurements:
|
||||||
|
#### Aerospike Statistics [values]:
|
||||||
|
|
||||||
|
Meta:
|
||||||
|
- units: Integer
|
||||||
|
|
||||||
|
Measurement names:
|
||||||
|
- batch_index_queue
|
||||||
|
- batch_index_unused_buffers
|
||||||
|
- batch_queue
|
||||||
|
- batch_tree_count
|
||||||
|
- client_connections
|
||||||
|
- data_used_bytes_memory
|
||||||
|
- index_used_bytes_memory
|
||||||
|
- info_queue
|
||||||
|
- migrate_progress_recv
|
||||||
|
- migrate_progress_send
|
||||||
|
- migrate_rx_objs
|
||||||
|
- migrate_tx_objs
|
||||||
|
- objects
|
||||||
|
- ongoing_write_reqs
|
||||||
|
- partition_absent
|
||||||
|
- partition_actual
|
||||||
|
- partition_desync
|
||||||
|
- partition_object_count
|
||||||
|
- partition_ref_count
|
||||||
|
- partition_replica
|
||||||
|
- proxy_in_progress
|
||||||
|
- query_agg_avg_rec_count
|
||||||
|
- query_avg_rec_count
|
||||||
|
- query_lookup_avg_rec_count
|
||||||
|
- queue
|
||||||
|
- record_locks
|
||||||
|
- record_refs
|
||||||
|
- sindex_used_bytes_memory
|
||||||
|
- sindex_gc_garbage_cleaned
|
||||||
|
- system_free_mem_pct
|
||||||
|
- total_bytes_disk
|
||||||
|
- total_bytes_memory
|
||||||
|
- tree_count
|
||||||
|
- scans_active
|
||||||
|
- uptime
|
||||||
|
- used_bytes_disk
|
||||||
|
- used_bytes_memory
|
||||||
|
- cluster_size
|
||||||
|
- waiting_transactions
|
||||||
|
|
||||||
|
#### Aerospike Statistics [cumulative]:
|
||||||
|
|
||||||
|
Meta:
|
||||||
|
- units: Integer
|
||||||
|
|
||||||
|
Measurement names:
|
||||||
|
- batch_errors
|
||||||
|
- batch_index_complete
|
||||||
|
- batch_index_errors
|
||||||
|
- batch_index_initiate
|
||||||
|
- batch_index_timeout
|
||||||
|
- batch_initiate
|
||||||
|
- batch_timeout
|
||||||
|
- err_duplicate_proxy_request
|
||||||
|
- err_out_of_space
|
||||||
|
- err_replica_non_null_node
|
||||||
|
- err_replica_null_node
|
||||||
|
- err_rw_cant_put_unique
|
||||||
|
- err_rw_pending_limit
|
||||||
|
- err_rw_request_not_found
|
||||||
|
- err_storage_queue_full
|
||||||
|
- err_sync_copy_null_master
|
||||||
|
- err_sync_copy_null_node
|
||||||
|
- err_tsvc_requests
|
||||||
|
- err_write_fail_bin_exists
|
||||||
|
- err_write_fail_generation
|
||||||
|
- err_write_fail_generation_xdr
|
||||||
|
- err_write_fail_incompatible_type
|
||||||
|
- err_write_fail_key_exists
|
||||||
|
- err_write_fail_key_mismatch
|
||||||
|
- err_write_fail_not_found
|
||||||
|
- err_write_fail_noxdr
|
||||||
|
- err_write_fail_parameter
|
||||||
|
- err_write_fail_prole_delete
|
||||||
|
- err_write_fail_prole_generation
|
||||||
|
- err_write_fail_prole_unknown
|
||||||
|
- err_write_fail_unknown
|
||||||
|
- fabric_msgs_rcvd
|
||||||
|
- fabric_msgs_sent
|
||||||
|
- heartbeat_received_foreign
|
||||||
|
- heartbeat_received_self
|
||||||
|
- migrate_msgs_recv
|
||||||
|
- migrate_msgs_sent
|
||||||
|
- migrate_num_incoming_accepted
|
||||||
|
- migrate_num_incoming_refused
|
||||||
|
- proxy_action
|
||||||
|
- proxy_initiate
|
||||||
|
- proxy_retry
|
||||||
|
- proxy_retry_new_dest
|
||||||
|
- proxy_retry_q_full
|
||||||
|
- proxy_retry_same_dest
|
||||||
|
- proxy_unproxy
|
||||||
|
- query_abort
|
||||||
|
- query_agg
|
||||||
|
- query_agg_abort
|
||||||
|
- query_agg_err
|
||||||
|
- query_agg_success
|
||||||
|
- query_bad_records
|
||||||
|
- query_fail
|
||||||
|
- query_long_queue_full
|
||||||
|
- query_long_running
|
||||||
|
- query_lookup_abort
|
||||||
|
- query_lookup_err
|
||||||
|
- query_lookups
|
||||||
|
- query_lookup_success
|
||||||
|
- query_reqs
|
||||||
|
- query_short_queue_full
|
||||||
|
- query_short_running
|
||||||
|
- query_success
|
||||||
|
- query_tracked
|
||||||
|
- read_dup_prole
|
||||||
|
- reaped_fds
|
||||||
|
- rw_err_ack_badnode
|
||||||
|
- rw_err_ack_internal
|
||||||
|
- rw_err_ack_nomatch
|
||||||
|
- rw_err_dup_cluster_key
|
||||||
|
- rw_err_dup_internal
|
||||||
|
- rw_err_dup_send
|
||||||
|
- rw_err_write_cluster_key
|
||||||
|
- rw_err_write_internal
|
||||||
|
- rw_err_write_send
|
||||||
|
- sindex_ucgarbage_found
|
||||||
|
- sindex_gc_locktimedout
|
||||||
|
- sindex_gc_inactivity_dur
|
||||||
|
- sindex_gc_activity_dur
|
||||||
|
- sindex_gc_list_creation_time
|
||||||
|
- sindex_gc_list_deletion_time
|
||||||
|
- sindex_gc_objects_validated
|
||||||
|
- sindex_gc_garbage_found
|
||||||
|
- stat_cluster_key_err_ack_dup_trans_reenqueue
|
||||||
|
- stat_cluster_key_err_ack_rw_trans_reenqueue
|
||||||
|
- stat_cluster_key_prole_retry
|
||||||
|
- stat_cluster_key_regular_processed
|
||||||
|
- stat_cluster_key_trans_to_proxy_retry
|
||||||
|
- stat_deleted_set_object
|
||||||
|
- stat_delete_success
|
||||||
|
- stat_duplicate_operation
|
||||||
|
- stat_evicted_objects
|
||||||
|
- stat_evicted_objects_time
|
||||||
|
- stat_evicted_set_objects
|
||||||
|
- stat_expired_objects
|
||||||
|
- stat_nsup_deletes_not_shipped
|
||||||
|
- stat_proxy_errs
|
||||||
|
- stat_proxy_reqs
|
||||||
|
- stat_proxy_reqs_xdr
|
||||||
|
- stat_proxy_success
|
||||||
|
- stat_read_errs_notfound
|
||||||
|
- stat_read_errs_other
|
||||||
|
- stat_read_reqs
|
||||||
|
- stat_read_reqs_xdr
|
||||||
|
- stat_read_success
|
||||||
|
- stat_rw_timeout
|
||||||
|
- stat_slow_trans_queue_batch_pop
|
||||||
|
- stat_slow_trans_queue_pop
|
||||||
|
- stat_slow_trans_queue_push
|
||||||
|
- stat_write_errs
|
||||||
|
- stat_write_errs_notfound
|
||||||
|
- stat_write_errs_other
|
||||||
|
- stat_write_reqs
|
||||||
|
- stat_write_reqs_xdr
|
||||||
|
- stat_write_success
|
||||||
|
- stat_xdr_pipe_miss
|
||||||
|
- stat_xdr_pipe_writes
|
||||||
|
- stat_zero_bin_records
|
||||||
|
- storage_defrag_corrupt_record
|
||||||
|
- storage_defrag_wait
|
||||||
|
- transactions
|
||||||
|
- basic_scans_succeeded
|
||||||
|
- basic_scans_failed
|
||||||
|
- aggr_scans_succeeded
|
||||||
|
- aggr_scans_failed
|
||||||
|
- udf_bg_scans_succeeded
|
||||||
|
- udf_bg_scans_failed
|
||||||
|
- udf_delete_err_others
|
||||||
|
- udf_delete_reqs
|
||||||
|
- udf_delete_success
|
||||||
|
- udf_lua_errs
|
||||||
|
- udf_query_rec_reqs
|
||||||
|
- udf_read_errs_other
|
||||||
|
- udf_read_reqs
|
||||||
|
- udf_read_success
|
||||||
|
- udf_replica_writes
|
||||||
|
- udf_scan_rec_reqs
|
||||||
|
- udf_write_err_others
|
||||||
|
- udf_write_reqs
|
||||||
|
- udf_write_success
|
||||||
|
- write_master
|
||||||
|
- write_prole
|
||||||
|
|
||||||
|
#### Aerospike Statistics [percentage]:
|
||||||
|
|
||||||
|
Meta:
|
||||||
|
- units: percent (out of 100)
|
||||||
|
|
||||||
|
Measurement names:
|
||||||
|
- free_pct_disk
|
||||||
|
- free_pct_memory
|
||||||
|
|
||||||
|
# Measurements:
|
||||||
|
#### Aerospike Namespace Statistics [values]:
|
||||||
|
|
||||||
|
Meta:
|
||||||
|
- units: Integer
|
||||||
|
- tags: `namespace=<namespace>`
|
||||||
|
|
||||||
|
Measurement names:
|
||||||
|
- available_bin_names
|
||||||
|
- available_pct
|
||||||
|
- current_time
|
||||||
|
- data_used_bytes_memory
|
||||||
|
- index_used_bytes_memory
|
||||||
|
- master_objects
|
||||||
|
- max_evicted_ttl
|
||||||
|
- max_void_time
|
||||||
|
- non_expirable_objects
|
||||||
|
- objects
|
||||||
|
- prole_objects
|
||||||
|
- sindex_used_bytes_memory
|
||||||
|
- total_bytes_disk
|
||||||
|
- total_bytes_memory
|
||||||
|
- used_bytes_disk
|
||||||
|
- used_bytes_memory
|
||||||
|
|
||||||
|
#### Aerospike Namespace Statistics [cumulative]:
|
||||||
|
|
||||||
|
Meta:
|
||||||
|
- units: Integer
|
||||||
|
- tags: `namespace=<namespace>`
|
||||||
|
|
||||||
|
Measurement names:
|
||||||
|
- evicted_objects
|
||||||
|
- expired_objects
|
||||||
|
- set_deleted_objects
|
||||||
|
- set_evicted_objects
|
||||||
|
|
||||||
|
#### Aerospike Namespace Statistics [percentage]:
|
||||||
|
|
||||||
|
Meta:
|
||||||
|
- units: percent (out of 100)
|
||||||
|
- tags: `namespace=<namespace>`
|
||||||
|
|
||||||
|
Measurement names:
|
||||||
|
- free_pct_disk
|
||||||
|
- free_pct_memory
|
||||||
342
plugins/aerospike/aerospike.go
Normal file
342
plugins/aerospike/aerospike.go
Normal file
@@ -0,0 +1,342 @@
|
|||||||
|
package aerospike
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"github.com/influxdb/telegraf/plugins"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
MSG_HEADER_SIZE = 8
|
||||||
|
MSG_TYPE = 1 // Info is 1
|
||||||
|
MSG_VERSION = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
STATISTICS_COMMAND = []byte("statistics\n")
|
||||||
|
NAMESPACES_COMMAND = []byte("namespaces\n")
|
||||||
|
)
|
||||||
|
|
||||||
|
type aerospikeMessageHeader struct {
|
||||||
|
Version uint8
|
||||||
|
Type uint8
|
||||||
|
DataLen [6]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type aerospikeMessage struct {
|
||||||
|
aerospikeMessageHeader
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Taken from aerospike-client-go/types/message.go
|
||||||
|
func (msg *aerospikeMessage) Serialize() []byte {
|
||||||
|
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
|
||||||
|
buf := bytes.NewBuffer([]byte{})
|
||||||
|
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
|
||||||
|
binary.Write(buf, binary.BigEndian, msg.Data[:])
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
type aerospikeInfoCommand struct {
|
||||||
|
msg *aerospikeMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Taken from aerospike-client-go/info.go
|
||||||
|
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
|
||||||
|
responses := make(map[string]string)
|
||||||
|
offset := int64(0)
|
||||||
|
begin := int64(0)
|
||||||
|
|
||||||
|
dataLen := int64(len(nfo.msg.Data))
|
||||||
|
|
||||||
|
// Create reusable StringBuilder for performance.
|
||||||
|
for offset < dataLen {
|
||||||
|
b := nfo.msg.Data[offset]
|
||||||
|
|
||||||
|
if b == '\t' {
|
||||||
|
name := nfo.msg.Data[begin:offset]
|
||||||
|
offset++
|
||||||
|
begin = offset
|
||||||
|
|
||||||
|
// Parse field value.
|
||||||
|
for offset < dataLen {
|
||||||
|
if nfo.msg.Data[offset] == '\n' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset > begin {
|
||||||
|
value := nfo.msg.Data[begin:offset]
|
||||||
|
responses[string(name)] = string(value)
|
||||||
|
} else {
|
||||||
|
responses[string(name)] = ""
|
||||||
|
}
|
||||||
|
offset++
|
||||||
|
begin = offset
|
||||||
|
} else if b == '\n' {
|
||||||
|
if offset > begin {
|
||||||
|
name := nfo.msg.Data[begin:offset]
|
||||||
|
responses[string(name)] = ""
|
||||||
|
}
|
||||||
|
offset++
|
||||||
|
begin = offset
|
||||||
|
} else {
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset > begin {
|
||||||
|
name := nfo.msg.Data[begin:offset]
|
||||||
|
responses[string(name)] = ""
|
||||||
|
}
|
||||||
|
return responses, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Aerospike struct {
|
||||||
|
Servers []string
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
# Aerospike servers to connect to (with port)
|
||||||
|
# Default: servers = ["localhost:3000"]
|
||||||
|
#
|
||||||
|
# This plugin will query all namespaces the aerospike
|
||||||
|
# server has configured and get stats for them.
|
||||||
|
servers = ["localhost:3000"]
|
||||||
|
`
|
||||||
|
|
||||||
|
func (a *Aerospike) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Aerospike) Description() string {
|
||||||
|
return "Read stats from an aerospike server"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Aerospike) Gather(acc plugins.Accumulator) error {
|
||||||
|
if len(a.Servers) == 0 {
|
||||||
|
return a.gatherServer("127.0.0.1:3000", acc)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
var outerr error
|
||||||
|
|
||||||
|
for _, server := range a.Servers {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(server string) {
|
||||||
|
defer wg.Done()
|
||||||
|
outerr = a.gatherServer(server, acc)
|
||||||
|
}(server)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
return outerr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Aerospike) gatherServer(host string, acc plugins.Accumulator) error {
|
||||||
|
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Aerospike info failed: %s", err)
|
||||||
|
}
|
||||||
|
readAerospikeStats(aerospikeInfo, acc, host, "")
|
||||||
|
namespaces, err := getList(NAMESPACES_COMMAND, host)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Aerospike namespace list failed: %s", err)
|
||||||
|
}
|
||||||
|
for ix := range namespaces {
|
||||||
|
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
|
||||||
|
}
|
||||||
|
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMap(key []byte, host string) (map[string]string, error) {
|
||||||
|
data, err := get(key, host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||||
|
}
|
||||||
|
parsed, err := unmarshalMapInfo(data, string(key))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parsed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getList(key []byte, host string) ([]string, error) {
|
||||||
|
data, err := get(key, host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to get data: %s", err)
|
||||||
|
}
|
||||||
|
parsed, err := unmarshalListInfo(data, string(key))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parsed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func get(key []byte, host string) (map[string]string, error) {
|
||||||
|
var err error
|
||||||
|
var data map[string]string
|
||||||
|
|
||||||
|
asInfo := &aerospikeInfoCommand{
|
||||||
|
msg: &aerospikeMessage{
|
||||||
|
aerospikeMessageHeader: aerospikeMessageHeader{
|
||||||
|
Version: uint8(MSG_VERSION),
|
||||||
|
Type: uint8(MSG_TYPE),
|
||||||
|
DataLen: msgLenToBytes(int64(len(key))),
|
||||||
|
},
|
||||||
|
Data: key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := asInfo.msg.Serialize()
|
||||||
|
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||||
|
if err != nil {
|
||||||
|
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := net.DialTCP("tcp", nil, addr)
|
||||||
|
if err != nil {
|
||||||
|
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
_, err = conn.Write(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
|
||||||
|
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
|
||||||
|
if err != nil {
|
||||||
|
return data, fmt.Errorf("Failed to read header: %s", err)
|
||||||
|
}
|
||||||
|
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
|
||||||
|
if err != nil {
|
||||||
|
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
|
||||||
|
|
||||||
|
if int64(len(asInfo.msg.Data)) != msgLen {
|
||||||
|
asInfo.msg.Data = make([]byte, msgLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
|
||||||
|
if err != nil {
|
||||||
|
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err = asInfo.parseMultiResponse()
|
||||||
|
if err != nil {
|
||||||
|
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func readAerospikeStats(
|
||||||
|
stats map[string]string,
|
||||||
|
acc plugins.Accumulator,
|
||||||
|
host string,
|
||||||
|
namespace string,
|
||||||
|
) {
|
||||||
|
fields := make(map[string]interface{})
|
||||||
|
tags := map[string]string{
|
||||||
|
"aerospike_host": host,
|
||||||
|
"namespace": "_service",
|
||||||
|
}
|
||||||
|
|
||||||
|
if namespace != "" {
|
||||||
|
tags["namespace"] = namespace
|
||||||
|
}
|
||||||
|
for key, value := range stats {
|
||||||
|
// We are going to ignore all string based keys
|
||||||
|
val, err := strconv.ParseInt(value, 10, 64)
|
||||||
|
if err == nil {
|
||||||
|
if strings.Contains(key, "-") {
|
||||||
|
key = strings.Replace(key, "-", "_", -1)
|
||||||
|
}
|
||||||
|
fields[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
acc.AddFields("aerospike", fields, tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||||
|
key = strings.TrimSuffix(key, "\n")
|
||||||
|
res := map[string]string{}
|
||||||
|
|
||||||
|
v, exists := infoMap[key]
|
||||||
|
if !exists {
|
||||||
|
return res, fmt.Errorf("Key '%s' missing from info", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
values := strings.Split(v, ";")
|
||||||
|
for i := range values {
|
||||||
|
kv := strings.Split(values[i], "=")
|
||||||
|
if len(kv) > 1 {
|
||||||
|
res[kv[0]] = kv[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
|
||||||
|
key = strings.TrimSuffix(key, "\n")
|
||||||
|
|
||||||
|
v, exists := infoMap[key]
|
||||||
|
if !exists {
|
||||||
|
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
values := strings.Split(v, ";")
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
|
||||||
|
var r int
|
||||||
|
for total < length {
|
||||||
|
r, err = c.Read(buffer[total:length])
|
||||||
|
total += r
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Taken from aerospike-client-go/types/message.go
|
||||||
|
func msgLenToBytes(DataLen int64) [6]byte {
|
||||||
|
b := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(b, uint64(DataLen))
|
||||||
|
res := [6]byte{}
|
||||||
|
copy(res[:], b[2:])
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// Taken from aerospike-client-go/types/message.go
|
||||||
|
func msgLenFromBytes(buf [6]byte) int64 {
|
||||||
|
nbytes := append([]byte{0, 0}, buf[:]...)
|
||||||
|
DataLen := binary.BigEndian.Uint64(nbytes)
|
||||||
|
return int64(DataLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
plugins.Add("aerospike", func() plugins.Plugin {
|
||||||
|
return &Aerospike{}
|
||||||
|
})
|
||||||
|
}
|
||||||
112
plugins/aerospike/aerospike_test.go
Normal file
112
plugins/aerospike/aerospike_test.go
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
package aerospike
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdb/telegraf/testutil"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAerospikeStatistics(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
a := &Aerospike{
|
||||||
|
Servers: []string{testutil.GetLocalHost() + ":3000"},
|
||||||
|
}
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
|
||||||
|
err := a.Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Only use a few of the metrics
|
||||||
|
asMetrics := []string{
|
||||||
|
"transactions",
|
||||||
|
"stat_write_errs",
|
||||||
|
"stat_read_reqs",
|
||||||
|
"stat_write_reqs",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metric := range asMetrics {
|
||||||
|
assert.True(t, acc.HasIntValue(metric), metric)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
|
||||||
|
var i int64 = 8
|
||||||
|
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
||||||
|
// Also test for re-writing
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
stats := map[string]string{
|
||||||
|
"stat-write-errs": "12345",
|
||||||
|
"stat_read_reqs": "12345",
|
||||||
|
}
|
||||||
|
readAerospikeStats(stats, &acc, "host1", "")
|
||||||
|
for k := range stats {
|
||||||
|
if k == "stat-write-errs" {
|
||||||
|
k = "stat_write_errs"
|
||||||
|
}
|
||||||
|
assert.True(t, acc.HasMeasurement(k))
|
||||||
|
assert.True(t, acc.CheckValue(k, int64(12345)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
stats := map[string]string{
|
||||||
|
"stat_write_errs": "12345",
|
||||||
|
"stat_read_reqs": "12345",
|
||||||
|
}
|
||||||
|
readAerospikeStats(stats, &acc, "host1", "test")
|
||||||
|
|
||||||
|
tags := map[string]string{
|
||||||
|
"aerospike_host": "host1",
|
||||||
|
"namespace": "test",
|
||||||
|
}
|
||||||
|
for k := range stats {
|
||||||
|
assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||||
|
i := map[string]string{
|
||||||
|
"test": "one;two;three",
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := []string{"one", "two", "three"}
|
||||||
|
|
||||||
|
list, err := unmarshalListInfo(i, "test2")
|
||||||
|
assert.True(t, err != nil)
|
||||||
|
|
||||||
|
list, err = unmarshalListInfo(i, "test")
|
||||||
|
assert.True(t, err == nil)
|
||||||
|
equal := true
|
||||||
|
for ix := range expected {
|
||||||
|
if list[ix] != expected[ix] {
|
||||||
|
equal = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.True(t, equal)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAerospikeUnmarshalMap(t *testing.T) {
|
||||||
|
i := map[string]string{
|
||||||
|
"test": "key1=value1;key2=value2",
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := map[string]string{
|
||||||
|
"key1": "value1",
|
||||||
|
"key2": "value2",
|
||||||
|
}
|
||||||
|
m, err := unmarshalMapInfo(i, "test")
|
||||||
|
assert.True(t, err == nil)
|
||||||
|
assert.True(t, reflect.DeepEqual(m, expected))
|
||||||
|
}
|
||||||
37
plugins/all/all.go
Normal file
37
plugins/all/all.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package all
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/aerospike"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/apache"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/bcache"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/disque"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/elasticsearch"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/exec"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/haproxy"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/httpjson"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/influxdb"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/jolokia"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/kafka_consumer"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/leofs"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/lustre2"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/mailchimp"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/memcached"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/mongodb"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/mysql"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/nginx"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/phpfpm"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/ping"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/postgresql"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/procstat"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/prometheus"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/puppetagent"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/rabbitmq"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/redis"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/rethinkdb"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/statsd"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/system"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/trig"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/twemproxy"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/zfs"
|
||||||
|
_ "github.com/influxdb/telegraf/plugins/zookeeper"
|
||||||
|
)
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# Telegraf plugin: Apache
|
# Telegraf plugin: Apache
|
||||||
|
|
||||||
#### Plugin arguments:
|
#### Plugin arguments:
|
||||||
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
|
- **urls** []string: List of apache-status URLs to collect from.
|
||||||
|
|
||||||
#### Description
|
#### Description
|
||||||
|
|
||||||
@@ -8,10 +8,10 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdb/telegraf/plugins"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Apache struct {
|
type Apache struct {
|
||||||
@@ -19,8 +19,7 @@ type Apache struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var sampleConfig = `
|
var sampleConfig = `
|
||||||
## An array of Apache status URI to gather stats.
|
# An array of Apache status URI to gather stats.
|
||||||
## Default is "http://localhost/server-status?auto".
|
|
||||||
urls = ["http://localhost/server-status?auto"]
|
urls = ["http://localhost/server-status?auto"]
|
||||||
`
|
`
|
||||||
|
|
||||||
@@ -32,13 +31,9 @@ func (n *Apache) Description() string {
|
|||||||
return "Read Apache status information (mod_status)"
|
return "Read Apache status information (mod_status)"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
func (n *Apache) Gather(acc plugins.Accumulator) error {
|
||||||
if len(n.Urls) == 0 {
|
var wg sync.WaitGroup
|
||||||
n.Urls = []string{"http://localhost/server-status?auto"}
|
|
||||||
}
|
|
||||||
|
|
||||||
var outerr error
|
var outerr error
|
||||||
var errch = make(chan error)
|
|
||||||
|
|
||||||
for _, u := range n.Urls {
|
for _, u := range n.Urls {
|
||||||
addr, err := url.Parse(u)
|
addr, err := url.Parse(u)
|
||||||
@@ -46,17 +41,14 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
|
|||||||
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
go func(addr *url.URL) {
|
go func(addr *url.URL) {
|
||||||
errch <- n.gatherUrl(addr, acc)
|
defer wg.Done()
|
||||||
|
outerr = n.gatherUrl(addr, acc)
|
||||||
}(addr)
|
}(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Drain channel, waiting for all requests to finish and save last error.
|
wg.Wait()
|
||||||
for range n.Urls {
|
|
||||||
if err := <-errch; err != nil {
|
|
||||||
outerr = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return outerr
|
return outerr
|
||||||
}
|
}
|
||||||
@@ -65,12 +57,9 @@ var tr = &http.Transport{
|
|||||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||||
}
|
}
|
||||||
|
|
||||||
var client = &http.Client{
|
var client = &http.Client{Transport: tr}
|
||||||
Transport: tr,
|
|
||||||
Timeout: time.Duration(4 * time.Second),
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
|
||||||
resp, err := client.Get(addr.String())
|
resp, err := client.Get(addr.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||||
@@ -175,7 +164,7 @@ func getTags(addr *url.URL) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
inputs.Add("apache", func() telegraf.Input {
|
plugins.Add("apache", func() plugins.Plugin {
|
||||||
return &Apache{}
|
return &Apache{}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user