Compare commits
11 Commits
master
...
ShubhamDX-
Author | SHA1 | Date |
---|---|---|
shubhamDX | aa2d76afb6 | |
shubhamDX | d193a9416d | |
shubhamDX | dcf81d7dfe | |
Shubham Srivastava | dd75a883ee | |
shubhamDX | 42fd21c19d | |
Shubham Srivastava | f4255d331f | |
Shubham Srivastava | 68c453c355 | |
shubhamDX | 7069ef46b2 | |
shubhamDX | 44034d1b73 | |
Shubham Srivastava | a4cf3eb98b | |
shubhamDX | 99f494f0d8 |
|
@ -1,218 +1,48 @@
|
||||||
---
|
---
|
||||||
defaults:
|
defaults: &defaults
|
||||||
defaults: &defaults
|
docker:
|
||||||
|
- image: 'circleci/golang:1.9.2'
|
||||||
working_directory: '/go/src/github.com/influxdata/telegraf'
|
working_directory: '/go/src/github.com/influxdata/telegraf'
|
||||||
environment:
|
|
||||||
GOFLAGS: -p=8
|
|
||||||
go-1_13: &go-1_13
|
|
||||||
docker:
|
|
||||||
- image: 'quay.io/influxdb/telegraf-ci:1.13.11'
|
|
||||||
go-1_14: &go-1_14
|
|
||||||
docker:
|
|
||||||
- image: 'quay.io/influxdb/telegraf-ci:1.14.3'
|
|
||||||
mac: &mac
|
|
||||||
macos:
|
|
||||||
xcode: 11.3.1
|
|
||||||
working_directory: '~/go/src/github.com/influxdata/telegraf'
|
|
||||||
environment:
|
|
||||||
HOMEBREW_NO_AUTO_UPDATE: 1
|
|
||||||
GOFLAGS: -p=8
|
|
||||||
|
|
||||||
version: 2
|
version: 2
|
||||||
jobs:
|
jobs:
|
||||||
deps:
|
build:
|
||||||
<<: [ *defaults, *go-1_14 ]
|
<<: *defaults
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- restore_cache:
|
- run: 'make ci-test'
|
||||||
key: go-mod-v1-{{ checksum "go.sum" }}
|
|
||||||
- run: 'make deps'
|
|
||||||
- run: 'make tidy'
|
|
||||||
- save_cache:
|
|
||||||
name: 'go module cache'
|
|
||||||
key: go-mod-v1-{{ checksum "go.sum" }}
|
|
||||||
paths:
|
|
||||||
- '/go/pkg/mod'
|
|
||||||
- persist_to_workspace:
|
|
||||||
root: '/go'
|
|
||||||
paths:
|
|
||||||
- '*'
|
|
||||||
macdeps:
|
|
||||||
<<: [ *mac ]
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- restore_cache:
|
|
||||||
key: mac-go-mod-v1-{{ checksum "go.sum" }}
|
|
||||||
- run: 'brew install go@1.13'
|
|
||||||
- run: 'make deps'
|
|
||||||
- run: 'make tidy'
|
|
||||||
- save_cache:
|
|
||||||
name: 'go module cache'
|
|
||||||
key: mac-go-mod-v1-{{ checksum "go.sum" }}
|
|
||||||
paths:
|
|
||||||
- '~/go/pkg/mod'
|
|
||||||
- '/usr/local/Cellar/go'
|
|
||||||
- '/usr/local/bin/go'
|
|
||||||
- '/usr/local/bin/gofmt'
|
|
||||||
- persist_to_workspace:
|
|
||||||
root: '/'
|
|
||||||
paths:
|
|
||||||
- 'usr/local/bin/go'
|
|
||||||
- 'usr/local/Cellar/go'
|
|
||||||
- 'usr/local/bin/gofmt'
|
|
||||||
- 'Users/distiller/go'
|
|
||||||
|
|
||||||
test-go-1.13:
|
|
||||||
<<: [ *defaults, *go-1_13 ]
|
|
||||||
steps:
|
|
||||||
- attach_workspace:
|
|
||||||
at: '/go'
|
|
||||||
- run: 'make'
|
|
||||||
- run: 'make test'
|
|
||||||
test-go-1.13-386:
|
|
||||||
<<: [ *defaults, *go-1_13 ]
|
|
||||||
steps:
|
|
||||||
- attach_workspace:
|
|
||||||
at: '/go'
|
|
||||||
- run: 'GOARCH=386 make'
|
|
||||||
- run: 'GOARCH=386 make test'
|
|
||||||
test-go-1.14:
|
|
||||||
<<: [ *defaults, *go-1_14 ]
|
|
||||||
steps:
|
|
||||||
- attach_workspace:
|
|
||||||
at: '/go'
|
|
||||||
- run: 'make'
|
|
||||||
- run: 'make check'
|
|
||||||
- run: 'make check-deps'
|
|
||||||
- run: 'make test'
|
|
||||||
test-go-1.14-386:
|
|
||||||
<<: [ *defaults, *go-1_14 ]
|
|
||||||
steps:
|
|
||||||
- attach_workspace:
|
|
||||||
at: '/go'
|
|
||||||
- run: 'GOARCH=386 make'
|
|
||||||
- run: 'GOARCH=386 make check'
|
|
||||||
- run: 'GOARCH=386 make test'
|
|
||||||
test-go-1.13-darwin:
|
|
||||||
<<: [ *mac ]
|
|
||||||
steps:
|
|
||||||
- attach_workspace:
|
|
||||||
at: '/'
|
|
||||||
- run: 'make'
|
|
||||||
- run: 'make check'
|
|
||||||
- run: 'make test'
|
|
||||||
|
|
||||||
package:
|
|
||||||
<<: [ *defaults, *go-1_14 ]
|
|
||||||
steps:
|
|
||||||
- attach_workspace:
|
|
||||||
at: '/go'
|
|
||||||
- run: 'make package'
|
|
||||||
- store_artifacts:
|
|
||||||
path: './build'
|
|
||||||
destination: 'build'
|
|
||||||
release:
|
release:
|
||||||
<<: [ *defaults, *go-1_14 ]
|
<<: *defaults
|
||||||
steps:
|
steps:
|
||||||
- attach_workspace:
|
- checkout
|
||||||
at: '/go'
|
- run: './scripts/release.sh'
|
||||||
- run: 'make package-release'
|
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: './build'
|
path: './artifacts'
|
||||||
destination: 'build'
|
destination: '.'
|
||||||
nightly:
|
nightly:
|
||||||
<<: [ *defaults, *go-1_14 ]
|
<<: *defaults
|
||||||
steps:
|
steps:
|
||||||
- attach_workspace:
|
- checkout
|
||||||
at: '/go'
|
- run: './scripts/release.sh'
|
||||||
- run: 'make package-nightly'
|
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: './build'
|
path: './artifacts'
|
||||||
destination: 'build'
|
destination: '.'
|
||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2
|
version: 2
|
||||||
check:
|
build_and_release:
|
||||||
jobs:
|
jobs:
|
||||||
- 'macdeps':
|
- 'build'
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- 'deps':
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- 'test-go-1.13':
|
|
||||||
requires:
|
|
||||||
- 'deps'
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- 'test-go-1.13-386':
|
|
||||||
requires:
|
|
||||||
- 'deps'
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- 'test-go-1.14':
|
|
||||||
requires:
|
|
||||||
- 'deps'
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- 'test-go-1.14-386':
|
|
||||||
requires:
|
|
||||||
- 'deps'
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- 'test-go-1.13-darwin':
|
|
||||||
requires:
|
|
||||||
- 'macdeps'
|
|
||||||
filters:
|
|
||||||
tags: # only runs on tags if you specify this filter
|
|
||||||
only: /.*/
|
|
||||||
- 'package':
|
|
||||||
requires:
|
|
||||||
- 'test-go-1.13'
|
|
||||||
- 'test-go-1.13-386'
|
|
||||||
- 'test-go-1.14'
|
|
||||||
- 'test-go-1.14-386'
|
|
||||||
- 'release':
|
- 'release':
|
||||||
requires:
|
requires:
|
||||||
- 'test-go-1.13'
|
- 'build'
|
||||||
- 'test-go-1.13-386'
|
|
||||||
- 'test-go-1.14'
|
|
||||||
- 'test-go-1.14-386'
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
branches:
|
|
||||||
ignore: /.*/
|
|
||||||
nightly:
|
nightly:
|
||||||
jobs:
|
jobs:
|
||||||
- 'deps'
|
- 'build'
|
||||||
- 'test-go-1.13':
|
|
||||||
requires:
|
|
||||||
- 'deps'
|
|
||||||
- 'test-go-1.13-386':
|
|
||||||
requires:
|
|
||||||
- 'deps'
|
|
||||||
- 'test-go-1.14':
|
|
||||||
requires:
|
|
||||||
- 'deps'
|
|
||||||
- 'test-go-1.14-386':
|
|
||||||
requires:
|
|
||||||
- 'deps'
|
|
||||||
- 'nightly':
|
- 'nightly':
|
||||||
requires:
|
requires:
|
||||||
- 'test-go-1.13'
|
- 'build'
|
||||||
- 'test-go-1.13-386'
|
|
||||||
- 'test-go-1.14'
|
|
||||||
- 'test-go-1.14-386'
|
|
||||||
triggers:
|
triggers:
|
||||||
- schedule:
|
- schedule:
|
||||||
cron: "0 7 * * *"
|
cron: "0 0 * * *"
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
CHANGELOG.md merge=union
|
CHANGELOG.md merge=union
|
||||||
README.md merge=union
|
README.md merge=union
|
||||||
go.sum merge=union
|
|
||||||
plugins/inputs/all/all.go merge=union
|
plugins/inputs/all/all.go merge=union
|
||||||
plugins/outputs/all/all.go merge=union
|
plugins/outputs/all/all.go merge=union
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
## Directions
|
||||||
|
|
||||||
|
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||||
|
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
|
||||||
|
|
||||||
|
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
|
||||||
|
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.
|
||||||
|
Erase the other section and everything on and above this line.
|
||||||
|
|
||||||
|
*Please note, the quickest way to fix a bug is to open a Pull Request.*
|
||||||
|
|
||||||
|
## Bug report
|
||||||
|
|
||||||
|
### Relevant telegraf.conf:
|
||||||
|
|
||||||
|
### System info:
|
||||||
|
|
||||||
|
[Include Telegraf version, operating system name, and other relevant details]
|
||||||
|
|
||||||
|
### Steps to reproduce:
|
||||||
|
|
||||||
|
1. ...
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
### Expected behavior:
|
||||||
|
|
||||||
|
### Actual behavior:
|
||||||
|
|
||||||
|
### Additional info:
|
||||||
|
|
||||||
|
[Include gist of relevant config, logs, etc.]
|
||||||
|
|
||||||
|
|
||||||
|
## Feature Request
|
||||||
|
|
||||||
|
Opening a feature request kicks off a discussion.
|
||||||
|
|
||||||
|
### Proposal:
|
||||||
|
|
||||||
|
### Current behavior:
|
||||||
|
|
||||||
|
### Desired behavior:
|
||||||
|
|
||||||
|
### Use case: [Why is this important (helps with prioritizing requests)]
|
|
@ -1,45 +0,0 @@
|
||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
|
|
||||||
---
|
|
||||||
<!--
|
|
||||||
Please redirect any questions about Telegraf usage to the InfluxData Community
|
|
||||||
site: https://community.influxdata.com
|
|
||||||
|
|
||||||
Check the documentation for the related plugin including the troubleshooting
|
|
||||||
section if available.
|
|
||||||
-->
|
|
||||||
|
|
||||||
### Relevant telegraf.conf:
|
|
||||||
<!-- Place config in the toml code section. -->
|
|
||||||
```toml
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
### System info:
|
|
||||||
|
|
||||||
<!-- Include Telegraf version, operating system, and other relevant details -->
|
|
||||||
|
|
||||||
### Docker
|
|
||||||
|
|
||||||
<!-- If your bug involves third party dependencies or services, it can be very helpful to provide a Dockerfile or docker-compose.yml that reproduces the environment you're testing against -->
|
|
||||||
|
|
||||||
### Steps to reproduce:
|
|
||||||
|
|
||||||
<!-- Describe the steps to reproduce the bug. -->
|
|
||||||
|
|
||||||
1. ...
|
|
||||||
2. ...
|
|
||||||
|
|
||||||
### Expected behavior:
|
|
||||||
|
|
||||||
<!-- Describe what you expected to happen when you performed the above steps. -->
|
|
||||||
|
|
||||||
### Actual behavior:
|
|
||||||
|
|
||||||
<!-- Describe what actually happened when you performed the above steps. -->
|
|
||||||
|
|
||||||
### Additional info:
|
|
||||||
|
|
||||||
<!-- Include gist of relevant config, logs, etc. -->
|
|
|
@ -1,17 +0,0 @@
|
||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Feature Request
|
|
||||||
|
|
||||||
Opening a feature request kicks off a discussion.
|
|
||||||
|
|
||||||
### Proposal:
|
|
||||||
|
|
||||||
### Current behavior:
|
|
||||||
|
|
||||||
### Desired behavior:
|
|
||||||
|
|
||||||
### Use case: <!-- [Why is this important (helps with prioritizing requests)] -->
|
|
|
@ -1,5 +1,5 @@
|
||||||
/build
|
build
|
||||||
/telegraf
|
/telegraf
|
||||||
/telegraf.exe
|
|
||||||
/telegraf.gz
|
/telegraf.gz
|
||||||
/vendor
|
*~
|
||||||
|
*#
|
||||||
|
|
1356
CHANGELOG.md
1356
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
507
CONTRIBUTING.md
507
CONTRIBUTING.md
|
@ -1,62 +1,484 @@
|
||||||
### Contributing
|
## Steps for Contributing:
|
||||||
|
|
||||||
1. [Sign the CLA][cla].
|
1. [Sign the CLA](http://influxdb.com/community/cla.html)
|
||||||
1. Open a [new issue][] to discuss the changes you would like to make. This is
|
1. Make changes or write plugin (see below for details)
|
||||||
not strictly required but it may help reduce the amount of rework you need
|
1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go`
|
||||||
to do later.
|
1. If your plugin requires a new Go package,
|
||||||
1. Make changes or write plugin using the guidelines in the following
|
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
|
||||||
documents:
|
1. Write a README for your plugin, if it's an input plugin, it should be structured
|
||||||
- [Input Plugins][inputs]
|
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
|
||||||
- [Processor Plugins][processors]
|
Output plugins READMEs are less structured,
|
||||||
- [Aggregator Plugins][aggregators]
|
but any information you can provide on how the data will look is appreciated.
|
||||||
- [Output Plugins][outputs]
|
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
|
||||||
1. Ensure you have added proper unit tests and documentation.
|
for a good example.
|
||||||
1. Open a new [pull request][].
|
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
|
||||||
|
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf).
|
||||||
|
|
||||||
#### Contributing an External Plugin *(experimental)*
|
## GoDoc
|
||||||
Input plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd) without having to change the plugin code.
|
|
||||||
|
|
||||||
Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) to easily compile it as a separate app and run it from the inputs.execd plugin.
|
|
||||||
|
|
||||||
#### Security Vulnerability Reporting
|
|
||||||
InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our
|
|
||||||
open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about
|
|
||||||
security vulnerability reporting,
|
|
||||||
including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/).
|
|
||||||
|
|
||||||
### GoDoc
|
|
||||||
|
|
||||||
Public interfaces for inputs, outputs, processors, aggregators, metrics,
|
Public interfaces for inputs, outputs, processors, aggregators, metrics,
|
||||||
and the accumulator can be found in the GoDoc:
|
and the accumulator can be found on the GoDoc
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf)
|
[![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf)
|
||||||
|
|
||||||
### Common development tasks
|
## Sign the CLA
|
||||||
|
|
||||||
**Adding a dependency:**
|
Before we can merge a pull request, you will need to sign the CLA,
|
||||||
|
which can be found [on our website](http://influxdb.com/community/cla.html)
|
||||||
|
|
||||||
Telegraf uses Go modules. Assuming you can already build the project, run this in the telegraf directory:
|
## Adding a dependency
|
||||||
|
|
||||||
1. `go get github.com/[dependency]/[new-package]`
|
Assuming you can already build the project, run these in the telegraf directory:
|
||||||
|
|
||||||
**Unit Tests:**
|
1. `go get github.com/sparrc/gdm`
|
||||||
|
1. `gdm restore`
|
||||||
|
1. `GOOS=linux gdm save`
|
||||||
|
|
||||||
|
## Input Plugins
|
||||||
|
|
||||||
|
This section is for developers who want to create new collection inputs.
|
||||||
|
Telegraf is entirely plugin driven. This interface allows for operators to
|
||||||
|
pick and chose what is gathered and makes it easy for developers
|
||||||
|
to create new ways of generating metrics.
|
||||||
|
|
||||||
|
Plugin authorship is kept as simple as possible to promote people to develop
|
||||||
|
and submit new inputs.
|
||||||
|
|
||||||
|
### Input Plugin Guidelines
|
||||||
|
|
||||||
|
* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface.
|
||||||
|
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
|
||||||
|
See below for a quick example.
|
||||||
|
* Input Plugins must be added to the
|
||||||
|
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
||||||
|
* The `SampleConfig` function should return valid toml that describes how the
|
||||||
|
plugin can be configured. This is include in `telegraf config`.
|
||||||
|
* The `Description` function should say in one line what this plugin does.
|
||||||
|
|
||||||
|
Let's say you've written a plugin that emits metrics about processes on the
|
||||||
|
current host.
|
||||||
|
|
||||||
|
### Input Plugin Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package simple
|
||||||
|
|
||||||
|
// simple.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Simple struct {
|
||||||
|
Ok bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Simple) Description() string {
|
||||||
|
return "a demo plugin"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Simple) SampleConfig() string {
|
||||||
|
return `
|
||||||
|
## Indicate if everything is fine
|
||||||
|
ok = true
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
||||||
|
if s.Ok {
|
||||||
|
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
|
||||||
|
} else {
|
||||||
|
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Adding Typed Metrics
|
||||||
|
|
||||||
|
In addition the the `AddFields` function, the accumulator also supports an
|
||||||
|
`AddGauge` and `AddCounter` function. These functions are for adding _typed_
|
||||||
|
metrics. Metric types are ignored for the InfluxDB output, but can be used
|
||||||
|
for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/).
|
||||||
|
|
||||||
|
## Input Plugins Accepting Arbitrary Data Formats
|
||||||
|
|
||||||
|
Some input plugins (such as
|
||||||
|
[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec))
|
||||||
|
accept arbitrary input data formats. An overview of these data formats can
|
||||||
|
be found
|
||||||
|
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
|
||||||
|
|
||||||
|
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
|
||||||
|
function on the plugin object (see the exec plugin for an example), as well as
|
||||||
|
defining `parser` as a field of the object.
|
||||||
|
|
||||||
|
You can then utilize the parser internally in your plugin, parsing data as you
|
||||||
|
see fit. Telegraf's configuration layer will take care of instantiating and
|
||||||
|
creating the `Parser` object.
|
||||||
|
|
||||||
|
You should also add the following to your SampleConfig() return:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
```
|
||||||
|
|
||||||
|
Below is the `Parser` interface.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Parser is an interface defining functions that a parser plugin must satisfy.
|
||||||
|
type Parser interface {
|
||||||
|
// Parse takes a byte buffer separated by newlines
|
||||||
|
// ie, `cpu.usage.idle 90\ncpu.usage.busy 10`
|
||||||
|
// and parses it into telegraf metrics
|
||||||
|
Parse(buf []byte) ([]telegraf.Metric, error)
|
||||||
|
|
||||||
|
// ParseLine takes a single string metric
|
||||||
|
// ie, "cpu.usage.idle 90"
|
||||||
|
// and parses it into a telegraf metric.
|
||||||
|
ParseLine(line string) (telegraf.Metric, error)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
And you can view the code
|
||||||
|
[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go)
|
||||||
|
|
||||||
|
## Service Input Plugins
|
||||||
|
|
||||||
|
This section is for developers who want to create new "service" collection
|
||||||
|
inputs. A service plugin differs from a regular plugin in that it operates
|
||||||
|
a background service while Telegraf is running. One example would be the `statsd`
|
||||||
|
plugin, which operates a statsd server.
|
||||||
|
|
||||||
|
Service Input Plugins are substantially more complicated than a regular plugin, as they
|
||||||
|
will require threads and locks to verify data integrity. Service Input Plugins should
|
||||||
|
be avoided unless there is no way to create their behavior with a regular plugin.
|
||||||
|
|
||||||
|
Their interface is quite similar to a regular plugin, with the addition of `Start()`
|
||||||
|
and `Stop()` methods.
|
||||||
|
|
||||||
|
### Service Plugin Guidelines
|
||||||
|
|
||||||
|
* Same as the `Plugin` guidelines, except that they must conform to the
|
||||||
|
`inputs.ServiceInput` interface.
|
||||||
|
|
||||||
|
## Output Plugins
|
||||||
|
|
||||||
|
This section is for developers who want to create a new output sink. Outputs
|
||||||
|
are created in a similar manner as collection plugins, and their interface has
|
||||||
|
similar constructs.
|
||||||
|
|
||||||
|
### Output Plugin Guidelines
|
||||||
|
|
||||||
|
* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface.
|
||||||
|
* Outputs should call `outputs.Add` in their `init` function to register themselves.
|
||||||
|
See below for a quick example.
|
||||||
|
* To be available within Telegraf itself, plugins must add themselves to the
|
||||||
|
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
||||||
|
* The `SampleConfig` function should return valid toml that describes how the
|
||||||
|
output can be configured. This is include in `telegraf config`.
|
||||||
|
* The `Description` function should say in one line what this output does.
|
||||||
|
|
||||||
|
### Output Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package simpleoutput
|
||||||
|
|
||||||
|
// simpleoutput.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/outputs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Simple struct {
|
||||||
|
Ok bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Simple) Description() string {
|
||||||
|
return "a demo output"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Simple) SampleConfig() string {
|
||||||
|
return `
|
||||||
|
ok = true
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Simple) Connect() error {
|
||||||
|
// Make a connection to the URL here
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Simple) Close() error {
|
||||||
|
// Close connection to the URL here
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
||||||
|
for _, metric := range metrics {
|
||||||
|
// write `metric` to the output sink here
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output Plugins Writing Arbitrary Data Formats
|
||||||
|
|
||||||
|
Some output plugins (such as
|
||||||
|
[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file))
|
||||||
|
can write arbitrary output data formats. An overview of these data formats can
|
||||||
|
be found
|
||||||
|
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
|
||||||
|
|
||||||
|
In order to enable this, you must specify a
|
||||||
|
`SetSerializer(serializer serializers.Serializer)`
|
||||||
|
function on the plugin object (see the file plugin for an example), as well as
|
||||||
|
defining `serializer` as a field of the object.
|
||||||
|
|
||||||
|
You can then utilize the serializer internally in your plugin, serializing data
|
||||||
|
before it's written. Telegraf's configuration layer will take care of
|
||||||
|
instantiating and creating the `Serializer` object.
|
||||||
|
|
||||||
|
You should also add the following to your SampleConfig() return:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
## Data format to output.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Service Output Plugins
|
||||||
|
|
||||||
|
This section is for developers who want to create new "service" output. A
|
||||||
|
service output differs from a regular output in that it operates a background service
|
||||||
|
while Telegraf is running. One example would be the `prometheus_client` output,
|
||||||
|
which operates an HTTP server.
|
||||||
|
|
||||||
|
Their interface is quite similar to a regular output, with the addition of `Start()`
|
||||||
|
and `Stop()` methods.
|
||||||
|
|
||||||
|
### Service Output Guidelines
|
||||||
|
|
||||||
|
* Same as the `Output` guidelines, except that they must conform to the
|
||||||
|
`output.ServiceOutput` interface.
|
||||||
|
|
||||||
|
## Processor Plugins
|
||||||
|
|
||||||
|
This section is for developers who want to create a new processor plugin.
|
||||||
|
|
||||||
|
### Processor Plugin Guidelines
|
||||||
|
|
||||||
|
* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface.
|
||||||
|
* Processors should call `processors.Add` in their `init` function to register themselves.
|
||||||
|
See below for a quick example.
|
||||||
|
* To be available within Telegraf itself, plugins must add themselves to the
|
||||||
|
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
|
||||||
|
* The `SampleConfig` function should return valid toml that describes how the
|
||||||
|
processor can be configured. This is include in the output of `telegraf config`.
|
||||||
|
* The `Description` function should say in one line what this processor does.
|
||||||
|
|
||||||
|
### Processor Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package printer
|
||||||
|
|
||||||
|
// printer.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/processors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Printer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
`
|
||||||
|
|
||||||
|
func (p *Printer) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) Description() string {
|
||||||
|
return "Print all metrics that pass through this filter."
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||||
|
for _, metric := range in {
|
||||||
|
fmt.Println(metric.String())
|
||||||
|
}
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
processors.Add("printer", func() telegraf.Processor {
|
||||||
|
return &Printer{}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Aggregator Plugins
|
||||||
|
|
||||||
|
This section is for developers who want to create a new aggregator plugin.
|
||||||
|
|
||||||
|
### Aggregator Plugin Guidelines
|
||||||
|
|
||||||
|
* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface.
|
||||||
|
* Aggregators should call `aggregators.Add` in their `init` function to register themselves.
|
||||||
|
See below for a quick example.
|
||||||
|
* To be available within Telegraf itself, plugins must add themselves to the
|
||||||
|
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
|
||||||
|
* The `SampleConfig` function should return valid toml that describes how the
|
||||||
|
aggregator can be configured. This is include in `telegraf config`.
|
||||||
|
* The `Description` function should say in one line what this aggregator does.
|
||||||
|
* The Aggregator plugin will need to keep caches of metrics that have passed
|
||||||
|
through it. This should be done using the builtin `HashID()` function of each
|
||||||
|
metric.
|
||||||
|
* When the `Reset()` function is called, all caches should be cleared.
|
||||||
|
|
||||||
|
### Aggregator Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
package min
|
||||||
|
|
||||||
|
// min.go
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Min struct {
|
||||||
|
// caches for metric fields, names, and tags
|
||||||
|
fieldCache map[uint64]map[string]float64
|
||||||
|
nameCache map[uint64]string
|
||||||
|
tagCache map[uint64]map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMin() telegraf.Aggregator {
|
||||||
|
m := &Min{}
|
||||||
|
m.Reset()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
var sampleConfig = `
|
||||||
|
## period is the flush & clear interval of the aggregator.
|
||||||
|
period = "30s"
|
||||||
|
## If true drop_original will drop the original metrics and
|
||||||
|
## only send aggregates.
|
||||||
|
drop_original = false
|
||||||
|
`
|
||||||
|
|
||||||
|
func (m *Min) SampleConfig() string {
|
||||||
|
return sampleConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Min) Description() string {
|
||||||
|
return "Keep the aggregate min of each metric passing through."
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Min) Add(in telegraf.Metric) {
|
||||||
|
id := in.HashID()
|
||||||
|
if _, ok := m.nameCache[id]; !ok {
|
||||||
|
// hit an uncached metric, create caches for first time:
|
||||||
|
m.nameCache[id] = in.Name()
|
||||||
|
m.tagCache[id] = in.Tags()
|
||||||
|
m.fieldCache[id] = make(map[string]float64)
|
||||||
|
for k, v := range in.Fields() {
|
||||||
|
if fv, ok := convert(v); ok {
|
||||||
|
m.fieldCache[id][k] = fv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for k, v := range in.Fields() {
|
||||||
|
if fv, ok := convert(v); ok {
|
||||||
|
if _, ok := m.fieldCache[id][k]; !ok {
|
||||||
|
// hit an uncached field of a cached metric
|
||||||
|
m.fieldCache[id][k] = fv
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fv < m.fieldCache[id][k] {
|
||||||
|
// set new minimum
|
||||||
|
m.fieldCache[id][k] = fv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Min) Push(acc telegraf.Accumulator) {
|
||||||
|
for id, _ := range m.nameCache {
|
||||||
|
fields := map[string]interface{}{}
|
||||||
|
for k, v := range m.fieldCache[id] {
|
||||||
|
fields[k+"_min"] = v
|
||||||
|
}
|
||||||
|
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Min) Reset() {
|
||||||
|
m.fieldCache = make(map[uint64]map[string]float64)
|
||||||
|
m.nameCache = make(map[uint64]string)
|
||||||
|
m.tagCache = make(map[uint64]map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func convert(in interface{}) (float64, bool) {
|
||||||
|
switch v := in.(type) {
|
||||||
|
case float64:
|
||||||
|
return v, true
|
||||||
|
case int64:
|
||||||
|
return float64(v), true
|
||||||
|
default:
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
aggregators.Add("min", func() telegraf.Aggregator {
|
||||||
|
return NewMin()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Unit Tests
|
||||||
|
|
||||||
Before opening a pull request you should run the linter checks and
|
Before opening a pull request you should run the linter checks and
|
||||||
the short tests.
|
the short tests.
|
||||||
|
|
||||||
```
|
### Execute linter
|
||||||
make check
|
|
||||||
make test
|
|
||||||
```
|
|
||||||
|
|
||||||
**Execute integration tests:**
|
execute `make lint`
|
||||||
|
|
||||||
(Optional)
|
### Execute short tests
|
||||||
|
|
||||||
|
execute `make test`
|
||||||
|
|
||||||
|
### Execute integration tests
|
||||||
|
|
||||||
Running the integration tests requires several docker containers to be
|
Running the integration tests requires several docker containers to be
|
||||||
running. You can start the containers with:
|
running. You can start the containers with:
|
||||||
```
|
```
|
||||||
docker-compose up
|
make docker-run
|
||||||
```
|
```
|
||||||
|
|
||||||
And run the full test suite with:
|
And run the full test suite with:
|
||||||
|
@ -65,12 +487,3 @@ make test-all
|
||||||
```
|
```
|
||||||
|
|
||||||
Use `make docker-kill` to stop the containers.
|
Use `make docker-kill` to stop the containers.
|
||||||
|
|
||||||
|
|
||||||
[cla]: https://www.influxdata.com/legal/cla/
|
|
||||||
[new issue]: https://github.com/influxdata/telegraf/issues/new/choose
|
|
||||||
[pull request]: https://github.com/influxdata/telegraf/compare
|
|
||||||
[inputs]: /docs/INPUTS.md
|
|
||||||
[processors]: /docs/PROCESSORS.md
|
|
||||||
[aggregators]: /docs/AGGREGATORS.md
|
|
||||||
[outputs]: /docs/OUTPUTS.md
|
|
||||||
|
|
16
Dockerfile
16
Dockerfile
|
@ -1,16 +0,0 @@
|
||||||
# Copy of scripts/stretch.docker
|
|
||||||
FROM golang:1.13.8 as builder
|
|
||||||
WORKDIR /go/src/github.com/influxdata/telegraf
|
|
||||||
|
|
||||||
COPY . /go/src/github.com/influxdata/telegraf
|
|
||||||
RUN make go-install
|
|
||||||
|
|
||||||
FROM buildpack-deps:stretch-curl
|
|
||||||
COPY --from=builder /go/bin/* /usr/bin/
|
|
||||||
COPY etc/telegraf.conf /etc/telegraf/telegraf.conf
|
|
||||||
|
|
||||||
EXPOSE 8125/udp 8092/udp 8094
|
|
||||||
|
|
||||||
COPY scripts/docker-entrypoint.sh /entrypoint.sh
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
CMD ["telegraf"]
|
|
|
@ -1,9 +0,0 @@
|
||||||
# External Plugins
|
|
||||||
|
|
||||||
This is a list of plugins that can be compiled outside of Telegraf and used via the execd input.
|
|
||||||
|
|
||||||
Pull requests welcome.
|
|
||||||
|
|
||||||
## Inputs
|
|
||||||
- [rand](https://github.com/ssoroka/rand) - Generate random numbers
|
|
||||||
- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts
|
|
|
@ -0,0 +1,96 @@
|
||||||
|
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
|
||||||
|
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
|
||||||
|
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||||
|
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||||
|
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||||
|
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||||
|
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
|
||||||
|
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||||
|
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||||
|
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||||
|
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
|
||||||
|
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||||
|
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
|
||||||
|
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
|
||||||
|
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
|
||||||
|
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
|
||||||
|
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
|
||||||
|
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
|
||||||
|
github.com/eclipse/paho.mqtt.golang aff15770515e3c57fc6109da73d42b0d46f7f483
|
||||||
|
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||||
|
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||||
|
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
|
||||||
|
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||||
|
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
|
||||||
|
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||||
|
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
|
||||||
|
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||||
|
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
|
||||||
|
github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea
|
||||||
|
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
|
||||||
|
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
|
||||||
|
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||||
|
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
|
||||||
|
github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea
|
||||||
|
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
|
||||||
|
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
|
||||||
|
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
|
||||||
|
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||||
|
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
|
||||||
|
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
|
||||||
|
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
|
||||||
|
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||||
|
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||||
|
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||||
|
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
|
||||||
|
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
|
||||||
|
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||||
|
github.com/nats-io/gnatsd 393bbb7c031433e68707c8810fda0bfcfbe6ab9b
|
||||||
|
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||||
|
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
|
||||||
|
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
|
||||||
|
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
|
||||||
|
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
|
||||||
|
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
|
||||||
|
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
|
||||||
|
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
|
||||||
|
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
|
||||||
|
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
|
||||||
|
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
|
||||||
|
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||||
|
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
|
||||||
|
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||||
|
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||||
|
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
|
||||||
|
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||||
|
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||||
|
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||||
|
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
|
||||||
|
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||||
|
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||||
|
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||||
|
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||||
|
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||||
|
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
|
||||||
|
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
|
||||||
|
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||||
|
github.com/tidwall/gjson 0623bd8fbdbf97cc62b98d15108832851a658e59
|
||||||
|
github.com/tidwall/match 173748da739a410c5b0b813b956f89ff94730b4c
|
||||||
|
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
|
||||||
|
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||||
|
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
|
||||||
|
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
|
||||||
|
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||||
|
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
||||||
|
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||||
|
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||||
|
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
|
||||||
|
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||||
|
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
|
||||||
|
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
|
||||||
|
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
|
||||||
|
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
|
||||||
|
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
|
||||||
|
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
|
||||||
|
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
|
||||||
|
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
2
LICENSE
2
LICENSE
|
@ -1,6 +1,6 @@
|
||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2015-2019 InfluxData Inc.
|
Copyright (c) 2015 InfluxDB
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
134
Makefile
134
Makefile
|
@ -1,155 +1,99 @@
|
||||||
ifeq ($(OS), Windows_NT)
|
|
||||||
VERSION := $(shell git describe --exact-match --tags 2>nul)
|
|
||||||
HOME := $(HOMEPATH)
|
|
||||||
CGO_ENABLED ?= 0
|
|
||||||
export CGO_ENABLED
|
|
||||||
else
|
|
||||||
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
|
||||||
endif
|
|
||||||
|
|
||||||
PREFIX := /usr/local
|
PREFIX := /usr/local
|
||||||
|
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
|
||||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
COMMIT := $(shell git rev-parse --short HEAD)
|
COMMIT := $(shell git rev-parse --short HEAD)
|
||||||
GOFILES ?= $(shell git ls-files '*.go')
|
GOFILES ?= $(shell git ls-files '*.go')
|
||||||
GOFMT ?= $(shell gofmt -l -s $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
|
GOFMT ?= $(shell gofmt -l $(GOFILES))
|
||||||
BUILDFLAGS ?=
|
|
||||||
|
|
||||||
ifdef GOBIN
|
ifdef GOBIN
|
||||||
PATH := $(GOBIN):$(PATH)
|
PATH := $(GOBIN):$(PATH)
|
||||||
else
|
else
|
||||||
PATH := $(subst :,/bin:,$(shell go env GOPATH))/bin:$(PATH)
|
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
TELEGRAF := telegraf$(shell go tool dist env | grep -q 'GOOS=.windows.' && echo .exe)
|
||||||
|
|
||||||
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
|
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
|
||||||
ifdef VERSION
|
ifdef VERSION
|
||||||
LDFLAGS += -X main.version=$(VERSION)
|
LDFLAGS += -X main.version=$(VERSION)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: all
|
|
||||||
all:
|
all:
|
||||||
@$(MAKE) --no-print-directory deps
|
$(MAKE) fmtcheck
|
||||||
@$(MAKE) --no-print-directory telegraf
|
$(MAKE) deps
|
||||||
|
$(MAKE) telegraf
|
||||||
|
|
||||||
|
ci-test:
|
||||||
|
$(MAKE) deps
|
||||||
|
$(MAKE) fmtcheck
|
||||||
|
$(MAKE) vet
|
||||||
|
$(MAKE) test
|
||||||
|
|
||||||
.PHONY: deps
|
|
||||||
deps:
|
deps:
|
||||||
go mod download
|
go get -u github.com/golang/lint/golint
|
||||||
|
go get github.com/sparrc/gdm
|
||||||
|
gdm restore
|
||||||
|
|
||||||
.PHONY: telegraf
|
|
||||||
telegraf:
|
telegraf:
|
||||||
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
go build -i -o $(TELEGRAF) -ldflags "$(LDFLAGS)" ./cmd/telegraf/telegraf.go
|
||||||
|
|
||||||
.PHONY: go-install
|
|
||||||
go-install:
|
go-install:
|
||||||
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
|
||||||
|
|
||||||
.PHONY: install
|
|
||||||
install: telegraf
|
install: telegraf
|
||||||
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
mkdir -p $(DESTDIR)$(PREFIX)/bin/
|
||||||
cp telegraf $(DESTDIR)$(PREFIX)/bin/
|
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
|
||||||
|
|
||||||
.PHONY: test
|
|
||||||
test:
|
test:
|
||||||
go test -short ./...
|
go test -short ./...
|
||||||
|
|
||||||
.PHONY: fmt
|
|
||||||
fmt:
|
fmt:
|
||||||
@gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
|
@gofmt -w $(GOFILES)
|
||||||
|
|
||||||
.PHONY: fmtcheck
|
|
||||||
fmtcheck:
|
fmtcheck:
|
||||||
@if [ ! -z "$(GOFMT)" ]; then \
|
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
|
||||||
|
@if [ ! -z $(GOFMT) ]; then \
|
||||||
echo "[ERROR] gofmt has found errors in the following files:" ; \
|
echo "[ERROR] gofmt has found errors in the following files:" ; \
|
||||||
echo "$(GOFMT)" ; \
|
echo "$(GOFMT)" ; \
|
||||||
echo "" ;\
|
echo "" ;\
|
||||||
echo "Run make fmt to fix them." ; \
|
echo "Run make fmt to fix them." ; \
|
||||||
exit 1 ;\
|
exit 1 ;\
|
||||||
fi
|
fi
|
||||||
|
@echo '[INFO] done.'
|
||||||
|
|
||||||
|
lint:
|
||||||
|
golint ./...
|
||||||
|
|
||||||
.PHONY: test-windows
|
|
||||||
test-windows:
|
test-windows:
|
||||||
go test -short ./plugins/inputs/ping/...
|
go test ./plugins/inputs/ping/...
|
||||||
go test -short ./plugins/inputs/win_perf_counters/...
|
go test ./plugins/inputs/win_perf_counters/...
|
||||||
go test -short ./plugins/inputs/win_services/...
|
go test ./plugins/inputs/win_services/...
|
||||||
go test -short ./plugins/inputs/procstat/...
|
go test ./plugins/inputs/procstat/...
|
||||||
go test -short ./plugins/inputs/ntpq/...
|
|
||||||
go test -short ./plugins/processors/port_name/...
|
|
||||||
|
|
||||||
.PHONY: vet
|
# vet runs the Go source code static analysis tool `vet` to find
|
||||||
|
# any common errors.
|
||||||
vet:
|
vet:
|
||||||
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
|
@echo 'go vet $$(go list ./...)'
|
||||||
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \
|
@go vet $$(go list ./...) ; if [ $$? -eq 1 ]; then \
|
||||||
echo ""; \
|
echo ""; \
|
||||||
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
|
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
|
||||||
echo "to fix them before submitting code for review."; \
|
echo "to fix them before submitting code for review."; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
.PHONY: tidy
|
test-all: vet
|
||||||
tidy:
|
|
||||||
go mod verify
|
|
||||||
go mod tidy
|
|
||||||
@if ! git diff --quiet go.mod go.sum; then \
|
|
||||||
echo "please run go mod tidy and check in changes"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
.PHONY: check
|
|
||||||
check: fmtcheck vet
|
|
||||||
@$(MAKE) --no-print-directory tidy
|
|
||||||
|
|
||||||
.PHONY: test-all
|
|
||||||
test-all: fmtcheck vet
|
|
||||||
go test ./...
|
go test ./...
|
||||||
|
|
||||||
.PHONY: check-deps
|
|
||||||
check-deps:
|
|
||||||
./scripts/check-deps.sh
|
|
||||||
|
|
||||||
.PHONY: package
|
|
||||||
package:
|
package:
|
||||||
./scripts/build.py --package --platform=all --arch=all
|
./scripts/build.py --package --platform=all --arch=all
|
||||||
|
|
||||||
.PHONY: package-release
|
|
||||||
package-release:
|
|
||||||
./scripts/build.py --release --package --platform=all --arch=all \
|
|
||||||
--upload --bucket=dl.influxdata.com/telegraf/releases
|
|
||||||
|
|
||||||
.PHONY: package-nightly
|
|
||||||
package-nightly:
|
|
||||||
./scripts/build.py --nightly --package --platform=all --arch=all \
|
|
||||||
--upload --bucket=dl.influxdata.com/telegraf/nightlies
|
|
||||||
|
|
||||||
.PHONY: clean
|
|
||||||
clean:
|
clean:
|
||||||
rm -f telegraf
|
rm -f telegraf
|
||||||
rm -f telegraf.exe
|
rm -f telegraf.exe
|
||||||
|
|
||||||
.PHONY: docker-image
|
|
||||||
docker-image:
|
docker-image:
|
||||||
docker build -f scripts/stretch.docker -t "telegraf:$(COMMIT)" .
|
./scripts/build.py --package --platform=linux --arch=amd64
|
||||||
|
cp build/telegraf*$(COMMIT)*.deb .
|
||||||
|
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
|
||||||
|
|
||||||
plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
|
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck
|
||||||
ragel -Z -G2 $^ -o $@
|
|
||||||
|
|
||||||
.PHONY: static
|
|
||||||
static:
|
|
||||||
@echo "Building static linux binary..."
|
|
||||||
@CGO_ENABLED=0 \
|
|
||||||
GOOS=linux \
|
|
||||||
GOARCH=amd64 \
|
|
||||||
go build -ldflags "$(LDFLAGS)" ./cmd/telegraf
|
|
||||||
|
|
||||||
.PHONY: plugin-%
|
|
||||||
plugin-%:
|
|
||||||
@echo "Starting dev environment for $${$(@)} input plugin..."
|
|
||||||
@docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up
|
|
||||||
|
|
||||||
.PHONY: ci-1.13
|
|
||||||
ci-1.13:
|
|
||||||
docker build -t quay.io/influxdb/telegraf-ci:1.13.8 - < scripts/ci-1.13.docker
|
|
||||||
docker push quay.io/influxdb/telegraf-ci:1.13.8
|
|
||||||
|
|
||||||
.PHONY: ci-1.12
|
|
||||||
ci-1.12:
|
|
||||||
docker build -t quay.io/influxdb/telegraf-ci:1.12.17 - < scripts/ci-1.12.docker
|
|
||||||
docker push quay.io/influxdb/telegraf-ci:1.12.17
|
|
||||||
|
|
289
README.md
289
README.md
|
@ -1,24 +1,24 @@
|
||||||
# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/)
|
# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/)
|
||||||
|
|
||||||
Telegraf is an agent for collecting, processing, aggregating, and writing metrics.
|
Telegraf is an agent written in Go for collecting, processing, aggregating,
|
||||||
|
and writing metrics.
|
||||||
|
|
||||||
Design goals are to have a minimal memory footprint with a plugin system so
|
Design goals are to have a minimal memory footprint with a plugin system so
|
||||||
that developers in the community can easily add support for collecting
|
that developers in the community can easily add support for collecting metrics
|
||||||
metrics.
|
from local or remote services.
|
||||||
|
|
||||||
Telegraf is plugin-driven and has the concept of 4 distinct plugin types:
|
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
|
||||||
|
|
||||||
1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs
|
1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs
|
||||||
2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics
|
2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics
|
||||||
3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.)
|
3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.)
|
||||||
4. [Output Plugins](#output-plugins) write metrics to various destinations
|
4. [Output Plugins](#output-plugins) write metrics to various destinations
|
||||||
|
|
||||||
New plugins are designed to be easy to contribute, pull requests are welcomed
|
For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md).
|
||||||
and we work to incorporate as many pull requests as possible.
|
|
||||||
|
|
||||||
## Try in Browser :rocket:
|
New plugins are designed to be easy to contribute,
|
||||||
|
we'll eagerly accept pull
|
||||||
You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/).
|
requests and will manage the set of plugins that Telegraf supports.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
|
@ -26,19 +26,8 @@ There are many ways to contribute:
|
||||||
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
|
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
|
||||||
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
|
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
|
||||||
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
|
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
|
||||||
- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/)
|
- Answer questions on github and on the [Community Site](https://community.influxdata.com/)
|
||||||
- [Contribute plugins](CONTRIBUTING.md)
|
- [Contribute plugins](CONTRIBUTING.md)
|
||||||
- [Contribute external plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) *(experimental)*
|
|
||||||
|
|
||||||
## Minimum Requirements
|
|
||||||
|
|
||||||
Telegraf shares the same [minimum requirements][] as Go:
|
|
||||||
- Linux kernel version 2.6.23 or later
|
|
||||||
- Windows 7 or later
|
|
||||||
- FreeBSD 11.2 or later
|
|
||||||
- MacOS 10.11 El Capitan or later
|
|
||||||
|
|
||||||
[minimum requirements]: https://github.com/golang/go/wiki/MinimumRequirements#minimum-requirements
|
|
||||||
|
|
||||||
## Installation:
|
## Installation:
|
||||||
|
|
||||||
|
@ -51,29 +40,20 @@ Ansible role: https://github.com/rossmcdonald/telegraf
|
||||||
|
|
||||||
### From Source:
|
### From Source:
|
||||||
|
|
||||||
Telegraf requires Go version 1.13 or newer, the Makefile requires GNU make.
|
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/doc/install) >=1.13 (1.14 recommended)
|
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
|
||||||
2. Clone the Telegraf repository:
|
which is installed by the Makefile if you don't have it already.
|
||||||
```
|
|
||||||
cd ~/src
|
|
||||||
git clone https://github.com/influxdata/telegraf.git
|
|
||||||
```
|
|
||||||
3. Run `make` from the source directory
|
|
||||||
```
|
|
||||||
cd ~/src/telegraf
|
|
||||||
make
|
|
||||||
```
|
|
||||||
|
|
||||||
### Changelog
|
1. [Install Go](https://golang.org/doc/install)
|
||||||
|
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
|
||||||
View the [changelog](/CHANGELOG.md) for the latest updates and changes by
|
3. Run `go get -d github.com/influxdata/telegraf`
|
||||||
version.
|
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
|
||||||
|
5. Run `make`
|
||||||
|
|
||||||
### Nightly Builds
|
### Nightly Builds
|
||||||
|
|
||||||
These builds are generated from the master branch:
|
These builds are generated from the master branch:
|
||||||
- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz)
|
|
||||||
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
|
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
|
||||||
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
|
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
|
||||||
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
|
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
|
||||||
|
@ -103,171 +83,105 @@ These builds are generated from the master branch:
|
||||||
See usage with:
|
See usage with:
|
||||||
|
|
||||||
```
|
```
|
||||||
telegraf --help
|
./telegraf --help
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Generate a telegraf config file:
|
#### Generate a telegraf config file:
|
||||||
|
|
||||||
```
|
```
|
||||||
telegraf config > telegraf.conf
|
./telegraf config > telegraf.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Generate config with only cpu input & influxdb output plugins defined:
|
#### Generate config with only cpu input & influxdb output plugins defined:
|
||||||
|
|
||||||
```
|
```
|
||||||
telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config
|
./telegraf --input-filter cpu --output-filter influxdb config
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Run a single telegraf collection, outputting metrics to stdout:
|
#### Run a single telegraf collection, outputing metrics to stdout:
|
||||||
|
|
||||||
```
|
```
|
||||||
telegraf --config telegraf.conf --test
|
./telegraf --config telegraf.conf --test
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Run telegraf with all plugins defined in config file:
|
#### Run telegraf with all plugins defined in config file:
|
||||||
|
|
||||||
```
|
```
|
||||||
telegraf --config telegraf.conf
|
./telegraf --config telegraf.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
|
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
|
||||||
|
|
||||||
```
|
```
|
||||||
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||||
```
|
```
|
||||||
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
[Latest Release Documentation][release docs].
|
## Configuration
|
||||||
|
|
||||||
For documentation on the latest development code see the [documentation index][devel docs].
|
See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced
|
||||||
|
configuration options.
|
||||||
[release docs]: https://docs.influxdata.com/telegraf
|
|
||||||
[devel docs]: docs
|
|
||||||
|
|
||||||
## Input Plugins
|
## Input Plugins
|
||||||
|
|
||||||
* [activemq](./plugins/inputs/activemq)
|
|
||||||
* [aerospike](./plugins/inputs/aerospike)
|
* [aerospike](./plugins/inputs/aerospike)
|
||||||
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
|
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
|
||||||
* [apache](./plugins/inputs/apache)
|
* [apache](./plugins/inputs/apache)
|
||||||
* [apcupsd](./plugins/inputs/apcupsd)
|
* [aws cloudwatch](./plugins/inputs/cloudwatch)
|
||||||
* [aurora](./plugins/inputs/aurora)
|
|
||||||
* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch)
|
|
||||||
* [azure_storage_queue](./plugins/inputs/azure_storage_queue)
|
|
||||||
* [bcache](./plugins/inputs/bcache)
|
* [bcache](./plugins/inputs/bcache)
|
||||||
* [beanstalkd](./plugins/inputs/beanstalkd)
|
|
||||||
* [bind](./plugins/inputs/bind)
|
|
||||||
* [bond](./plugins/inputs/bond)
|
* [bond](./plugins/inputs/bond)
|
||||||
* [burrow](./plugins/inputs/burrow)
|
* [cassandra](./plugins/inputs/cassandra)
|
||||||
* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
|
||||||
* [ceph](./plugins/inputs/ceph)
|
* [ceph](./plugins/inputs/ceph)
|
||||||
* [cgroup](./plugins/inputs/cgroup)
|
* [cgroup](./plugins/inputs/cgroup)
|
||||||
* [chrony](./plugins/inputs/chrony)
|
* [chrony](./plugins/inputs/chrony)
|
||||||
* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi)
|
|
||||||
* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt)
|
|
||||||
* [clickhouse](./plugins/inputs/clickhouse)
|
|
||||||
* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub
|
|
||||||
* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint
|
|
||||||
* [conntrack](./plugins/inputs/conntrack)
|
|
||||||
* [consul](./plugins/inputs/consul)
|
* [consul](./plugins/inputs/consul)
|
||||||
|
* [conntrack](./plugins/inputs/conntrack)
|
||||||
* [couchbase](./plugins/inputs/couchbase)
|
* [couchbase](./plugins/inputs/couchbase)
|
||||||
* [couchdb](./plugins/inputs/couchdb)
|
* [couchdb](./plugins/inputs/couchdb)
|
||||||
* [cpu](./plugins/inputs/cpu)
|
|
||||||
* [DC/OS](./plugins/inputs/dcos)
|
* [DC/OS](./plugins/inputs/dcos)
|
||||||
* [diskio](./plugins/inputs/diskio)
|
|
||||||
* [disk](./plugins/inputs/disk)
|
|
||||||
* [disque](./plugins/inputs/disque)
|
* [disque](./plugins/inputs/disque)
|
||||||
* [dmcache](./plugins/inputs/dmcache)
|
* [dmcache](./plugins/inputs/dmcache)
|
||||||
* [dns query time](./plugins/inputs/dns_query)
|
* [dns query time](./plugins/inputs/dns_query)
|
||||||
* [docker](./plugins/inputs/docker)
|
* [docker](./plugins/inputs/docker)
|
||||||
* [docker_log](./plugins/inputs/docker_log)
|
|
||||||
* [dovecot](./plugins/inputs/dovecot)
|
* [dovecot](./plugins/inputs/dovecot)
|
||||||
* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate)
|
|
||||||
* [elasticsearch](./plugins/inputs/elasticsearch)
|
* [elasticsearch](./plugins/inputs/elasticsearch)
|
||||||
* [ethtool](./plugins/inputs/ethtool)
|
|
||||||
* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub)
|
|
||||||
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
|
||||||
* [execd](./plugins/inputs/execd)
|
|
||||||
* [fail2ban](./plugins/inputs/fail2ban)
|
* [fail2ban](./plugins/inputs/fail2ban)
|
||||||
* [fibaro](./plugins/inputs/fibaro)
|
|
||||||
* [file](./plugins/inputs/file)
|
|
||||||
* [filestat](./plugins/inputs/filestat)
|
* [filestat](./plugins/inputs/filestat)
|
||||||
* [filecount](./plugins/inputs/filecount)
|
|
||||||
* [fireboard](/plugins/inputs/fireboard)
|
|
||||||
* [fluentd](./plugins/inputs/fluentd)
|
* [fluentd](./plugins/inputs/fluentd)
|
||||||
* [github](./plugins/inputs/github)
|
|
||||||
* [graylog](./plugins/inputs/graylog)
|
* [graylog](./plugins/inputs/graylog)
|
||||||
* [haproxy](./plugins/inputs/haproxy)
|
* [haproxy](./plugins/inputs/haproxy)
|
||||||
* [hddtemp](./plugins/inputs/hddtemp)
|
* [hddtemp](./plugins/inputs/hddtemp)
|
||||||
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
|
||||||
* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener))
|
|
||||||
* [http_listener_v2](./plugins/inputs/http_listener_v2)
|
|
||||||
* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats)
|
|
||||||
* [http_response](./plugins/inputs/http_response)
|
* [http_response](./plugins/inputs/http_response)
|
||||||
* [icinga2](./plugins/inputs/icinga2)
|
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
|
||||||
* [infiniband](./plugins/inputs/infiniband)
|
|
||||||
* [influxdb](./plugins/inputs/influxdb)
|
|
||||||
* [influxdb_listener](./plugins/inputs/influxdb_listener)
|
|
||||||
* [internal](./plugins/inputs/internal)
|
* [internal](./plugins/inputs/internal)
|
||||||
|
* [influxdb](./plugins/inputs/influxdb)
|
||||||
* [interrupts](./plugins/inputs/interrupts)
|
* [interrupts](./plugins/inputs/interrupts)
|
||||||
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
|
||||||
* [ipset](./plugins/inputs/ipset)
|
|
||||||
* [iptables](./plugins/inputs/iptables)
|
* [iptables](./plugins/inputs/iptables)
|
||||||
* [ipvs](./plugins/inputs/ipvs)
|
* [ipset](./plugins/inputs/ipset)
|
||||||
* [jenkins](./plugins/inputs/jenkins)
|
|
||||||
* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka)
|
|
||||||
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
|
||||||
* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry)
|
* [jolokia2](./plugins/inputs/jolokia2)
|
||||||
* [kafka_consumer](./plugins/inputs/kafka_consumer)
|
|
||||||
* [kapacitor](./plugins/inputs/kapacitor)
|
* [kapacitor](./plugins/inputs/kapacitor)
|
||||||
* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis)
|
|
||||||
* [kernel](./plugins/inputs/kernel)
|
|
||||||
* [kernel_vmstat](./plugins/inputs/kernel_vmstat)
|
|
||||||
* [kibana](./plugins/inputs/kibana)
|
|
||||||
* [kubernetes](./plugins/inputs/kubernetes)
|
* [kubernetes](./plugins/inputs/kubernetes)
|
||||||
* [kube_inventory](./plugins/inputs/kube_inventory)
|
|
||||||
* [lanz](./plugins/inputs/lanz)
|
|
||||||
* [leofs](./plugins/inputs/leofs)
|
* [leofs](./plugins/inputs/leofs)
|
||||||
* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs)
|
|
||||||
* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail))
|
|
||||||
* [logstash](./plugins/inputs/logstash)
|
|
||||||
* [lustre2](./plugins/inputs/lustre2)
|
* [lustre2](./plugins/inputs/lustre2)
|
||||||
* [mailchimp](./plugins/inputs/mailchimp)
|
* [mailchimp](./plugins/inputs/mailchimp)
|
||||||
* [marklogic](./plugins/inputs/marklogic)
|
|
||||||
* [mcrouter](./plugins/inputs/mcrouter)
|
|
||||||
* [memcached](./plugins/inputs/memcached)
|
* [memcached](./plugins/inputs/memcached)
|
||||||
* [mem](./plugins/inputs/mem)
|
|
||||||
* [mesos](./plugins/inputs/mesos)
|
* [mesos](./plugins/inputs/mesos)
|
||||||
* [minecraft](./plugins/inputs/minecraft)
|
* [minecraft](./plugins/inputs/minecraft)
|
||||||
* [modbus](./plugins/inputs/modbus)
|
|
||||||
* [mongodb](./plugins/inputs/mongodb)
|
* [mongodb](./plugins/inputs/mongodb)
|
||||||
* [monit](./plugins/inputs/monit)
|
|
||||||
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
|
|
||||||
* [multifile](./plugins/inputs/multifile)
|
|
||||||
* [mysql](./plugins/inputs/mysql)
|
* [mysql](./plugins/inputs/mysql)
|
||||||
* [nats_consumer](./plugins/inputs/nats_consumer)
|
|
||||||
* [nats](./plugins/inputs/nats)
|
* [nats](./plugins/inputs/nats)
|
||||||
* [neptune_apex](./plugins/inputs/neptune_apex)
|
|
||||||
* [net](./plugins/inputs/net)
|
|
||||||
* [net_response](./plugins/inputs/net_response)
|
* [net_response](./plugins/inputs/net_response)
|
||||||
* [netstat](./plugins/inputs/net)
|
|
||||||
* [nginx](./plugins/inputs/nginx)
|
* [nginx](./plugins/inputs/nginx)
|
||||||
* [nginx_plus_api](./plugins/inputs/nginx_plus_api)
|
|
||||||
* [nginx_plus](./plugins/inputs/nginx_plus)
|
* [nginx_plus](./plugins/inputs/nginx_plus)
|
||||||
* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check)
|
|
||||||
* [nginx_vts](./plugins/inputs/nginx_vts)
|
|
||||||
* [nsq_consumer](./plugins/inputs/nsq_consumer)
|
|
||||||
* [nsq](./plugins/inputs/nsq)
|
* [nsq](./plugins/inputs/nsq)
|
||||||
* [nstat](./plugins/inputs/nstat)
|
* [nstat](./plugins/inputs/nstat)
|
||||||
* [ntpq](./plugins/inputs/ntpq)
|
* [ntpq](./plugins/inputs/ntpq)
|
||||||
* [nvidia_smi](./plugins/inputs/nvidia_smi)
|
|
||||||
* [openldap](./plugins/inputs/openldap)
|
* [openldap](./plugins/inputs/openldap)
|
||||||
* [openntpd](./plugins/inputs/openntpd)
|
|
||||||
* [opensmtpd](./plugins/inputs/opensmtpd)
|
* [opensmtpd](./plugins/inputs/opensmtpd)
|
||||||
* [openweathermap](./plugins/inputs/openweathermap)
|
|
||||||
* [pf](./plugins/inputs/pf)
|
* [pf](./plugins/inputs/pf)
|
||||||
* [pgbouncer](./plugins/inputs/pgbouncer)
|
|
||||||
* [phpfpm](./plugins/inputs/phpfpm)
|
* [phpfpm](./plugins/inputs/phpfpm)
|
||||||
* [phusion passenger](./plugins/inputs/passenger)
|
* [phusion passenger](./plugins/inputs/passenger)
|
||||||
* [ping](./plugins/inputs/ping)
|
* [ping](./plugins/inputs/ping)
|
||||||
|
@ -275,8 +189,6 @@ For documentation on the latest development code see the [documentation index][d
|
||||||
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
|
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
|
||||||
* [postgresql](./plugins/inputs/postgresql)
|
* [postgresql](./plugins/inputs/postgresql)
|
||||||
* [powerdns](./plugins/inputs/powerdns)
|
* [powerdns](./plugins/inputs/powerdns)
|
||||||
* [powerdns_recursor](./plugins/inputs/powerdns_recursor)
|
|
||||||
* [processes](./plugins/inputs/processes)
|
|
||||||
* [procstat](./plugins/inputs/procstat)
|
* [procstat](./plugins/inputs/procstat)
|
||||||
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
|
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
|
||||||
* [puppetagent](./plugins/inputs/puppetagent)
|
* [puppetagent](./plugins/inputs/puppetagent)
|
||||||
|
@ -287,35 +199,47 @@ For documentation on the latest development code see the [documentation index][d
|
||||||
* [riak](./plugins/inputs/riak)
|
* [riak](./plugins/inputs/riak)
|
||||||
* [salesforce](./plugins/inputs/salesforce)
|
* [salesforce](./plugins/inputs/salesforce)
|
||||||
* [sensors](./plugins/inputs/sensors)
|
* [sensors](./plugins/inputs/sensors)
|
||||||
* [sflow](./plugins/inputs/sflow)
|
|
||||||
* [smart](./plugins/inputs/smart)
|
* [smart](./plugins/inputs/smart)
|
||||||
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
|
||||||
* [snmp](./plugins/inputs/snmp)
|
* [snmp](./plugins/inputs/snmp)
|
||||||
* [snmp_trap](./plugins/inputs/snmp_trap)
|
* [snmp_legacy](./plugins/inputs/snmp_legacy)
|
||||||
* [socket_listener](./plugins/inputs/socket_listener)
|
|
||||||
* [solr](./plugins/inputs/solr)
|
* [solr](./plugins/inputs/solr)
|
||||||
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
* [sql server](./plugins/inputs/sqlserver) (microsoft)
|
||||||
* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring)
|
|
||||||
* [statsd](./plugins/inputs/statsd)
|
|
||||||
* [suricata](./plugins/inputs/suricata)
|
|
||||||
* [swap](./plugins/inputs/swap)
|
|
||||||
* [synproxy](./plugins/inputs/synproxy)
|
|
||||||
* [syslog](./plugins/inputs/syslog)
|
|
||||||
* [sysstat](./plugins/inputs/sysstat)
|
|
||||||
* [systemd_units](./plugins/inputs/systemd_units)
|
|
||||||
* [system](./plugins/inputs/system)
|
|
||||||
* [tail](./plugins/inputs/tail)
|
|
||||||
* [temp](./plugins/inputs/temp)
|
|
||||||
* [tcp_listener](./plugins/inputs/socket_listener)
|
|
||||||
* [teamspeak](./plugins/inputs/teamspeak)
|
* [teamspeak](./plugins/inputs/teamspeak)
|
||||||
* [tengine](./plugins/inputs/tengine)
|
|
||||||
* [tomcat](./plugins/inputs/tomcat)
|
* [tomcat](./plugins/inputs/tomcat)
|
||||||
* [twemproxy](./plugins/inputs/twemproxy)
|
* [twemproxy](./plugins/inputs/twemproxy)
|
||||||
* [udp_listener](./plugins/inputs/socket_listener)
|
* [unbound](./plugins/input/unbound)
|
||||||
* [unbound](./plugins/inputs/unbound)
|
|
||||||
* [uwsgi](./plugins/inputs/uwsgi)
|
|
||||||
* [varnish](./plugins/inputs/varnish)
|
* [varnish](./plugins/inputs/varnish)
|
||||||
* [vsphere](./plugins/inputs/vsphere) VMware vSphere
|
* [zfs](./plugins/inputs/zfs)
|
||||||
|
* [zookeeper](./plugins/inputs/zookeeper)
|
||||||
|
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
|
||||||
|
* [win_services](./plugins/inputs/win_services)
|
||||||
|
* [sysstat](./plugins/inputs/sysstat)
|
||||||
|
* [system](./plugins/inputs/system)
|
||||||
|
* cpu
|
||||||
|
* mem
|
||||||
|
* net
|
||||||
|
* netstat
|
||||||
|
* disk
|
||||||
|
* diskio
|
||||||
|
* swap
|
||||||
|
* processes
|
||||||
|
* kernel (/proc/stat)
|
||||||
|
* kernel (/proc/vmstat)
|
||||||
|
* linux_sysctl_fs (/proc/sys/fs)
|
||||||
|
|
||||||
|
Telegraf can also collect metrics via the following service plugins:
|
||||||
|
|
||||||
|
* [http_listener](./plugins/inputs/http_listener)
|
||||||
|
* [kafka_consumer](./plugins/inputs/kafka_consumer)
|
||||||
|
* [mqtt_consumer](./plugins/inputs/mqtt_consumer)
|
||||||
|
* [nats_consumer](./plugins/inputs/nats_consumer)
|
||||||
|
* [nsq_consumer](./plugins/inputs/nsq_consumer)
|
||||||
|
* [logparser](./plugins/inputs/logparser)
|
||||||
|
* [statsd](./plugins/inputs/statsd)
|
||||||
|
* [socket_listener](./plugins/inputs/socket_listener)
|
||||||
|
* [tail](./plugins/inputs/tail)
|
||||||
|
* [tcp_listener](./plugins/inputs/socket_listener)
|
||||||
|
* [udp_listener](./plugins/inputs/socket_listener)
|
||||||
* [webhooks](./plugins/inputs/webhooks)
|
* [webhooks](./plugins/inputs/webhooks)
|
||||||
* [filestack](./plugins/inputs/webhooks/filestack)
|
* [filestack](./plugins/inputs/webhooks/filestack)
|
||||||
* [github](./plugins/inputs/webhooks/github)
|
* [github](./plugins/inputs/webhooks/github)
|
||||||
|
@ -323,107 +247,54 @@ For documentation on the latest development code see the [documentation index][d
|
||||||
* [papertrail](./plugins/inputs/webhooks/papertrail)
|
* [papertrail](./plugins/inputs/webhooks/papertrail)
|
||||||
* [particle](./plugins/inputs/webhooks/particle)
|
* [particle](./plugins/inputs/webhooks/particle)
|
||||||
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
* [rollbar](./plugins/inputs/webhooks/rollbar)
|
||||||
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
|
|
||||||
* [win_services](./plugins/inputs/win_services)
|
|
||||||
* [wireguard](./plugins/inputs/wireguard)
|
|
||||||
* [wireless](./plugins/inputs/wireless)
|
|
||||||
* [x509_cert](./plugins/inputs/x509_cert)
|
|
||||||
* [zfs](./plugins/inputs/zfs)
|
|
||||||
* [zipkin](./plugins/inputs/zipkin)
|
* [zipkin](./plugins/inputs/zipkin)
|
||||||
* [zookeeper](./plugins/inputs/zookeeper)
|
|
||||||
|
|
||||||
## Parsers
|
Telegraf is able to parse the following input data formats into metrics, these
|
||||||
|
formats may be used with input plugins supporting the `data_format` option:
|
||||||
|
|
||||||
- [InfluxDB Line Protocol](/plugins/parsers/influx)
|
* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
|
||||||
- [Collectd](/plugins/parsers/collectd)
|
* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
|
||||||
- [CSV](/plugins/parsers/csv)
|
* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
|
||||||
- [Dropwizard](/plugins/parsers/dropwizard)
|
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
|
||||||
- [FormUrlencoded](/plugins/parser/form_urlencoded)
|
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
|
||||||
- [Graphite](/plugins/parsers/graphite)
|
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
|
||||||
- [Grok](/plugins/parsers/grok)
|
* [Dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard)
|
||||||
- [JSON](/plugins/parsers/json)
|
|
||||||
- [Logfmt](/plugins/parsers/logfmt)
|
|
||||||
- [Nagios](/plugins/parsers/nagios)
|
|
||||||
- [Value](/plugins/parsers/value), ie: 45 or "booyah"
|
|
||||||
- [Wavefront](/plugins/parsers/wavefront)
|
|
||||||
|
|
||||||
## Serializers
|
|
||||||
|
|
||||||
- [InfluxDB Line Protocol](/plugins/serializers/influx)
|
|
||||||
- [JSON](/plugins/serializers/json)
|
|
||||||
- [Graphite](/plugins/serializers/graphite)
|
|
||||||
- [ServiceNow](/plugins/serializers/nowmetric)
|
|
||||||
- [SplunkMetric](/plugins/serializers/splunkmetric)
|
|
||||||
- [Carbon2](/plugins/serializers/carbon2)
|
|
||||||
- [Wavefront](/plugins/serializers/wavefront)
|
|
||||||
|
|
||||||
## Processor Plugins
|
## Processor Plugins
|
||||||
|
|
||||||
* [clone](/plugins/processors/clone)
|
* [printer](./plugins/processors/printer)
|
||||||
* [converter](/plugins/processors/converter)
|
|
||||||
* [date](/plugins/processors/date)
|
|
||||||
* [dedup](/plugins/processors/dedup)
|
|
||||||
* [defaults](/plugins/processors/defaults)
|
|
||||||
* [enum](/plugins/processors/enum)
|
|
||||||
* [filepath](/plugins/processors/filepath)
|
|
||||||
* [override](/plugins/processors/override)
|
|
||||||
* [parser](/plugins/processors/parser)
|
|
||||||
* [pivot](/plugins/processors/pivot)
|
|
||||||
* [printer](/plugins/processors/printer)
|
|
||||||
* [regex](/plugins/processors/regex)
|
|
||||||
* [rename](/plugins/processors/rename)
|
|
||||||
* [s2geo](/plugins/processors/s2geo)
|
|
||||||
* [strings](/plugins/processors/strings)
|
|
||||||
* [tag_limit](/plugins/processors/tag_limit)
|
|
||||||
* [template](/plugins/processors/template)
|
|
||||||
* [topk](/plugins/processors/topk)
|
|
||||||
* [unpivot](/plugins/processors/unpivot)
|
|
||||||
|
|
||||||
## Aggregator Plugins
|
## Aggregator Plugins
|
||||||
|
|
||||||
* [basicstats](./plugins/aggregators/basicstats)
|
* [basicstats](./plugins/aggregators/basicstats)
|
||||||
* [final](./plugins/aggregators/final)
|
|
||||||
* [histogram](./plugins/aggregators/histogram)
|
|
||||||
* [merge](./plugins/aggregators/merge)
|
|
||||||
* [minmax](./plugins/aggregators/minmax)
|
* [minmax](./plugins/aggregators/minmax)
|
||||||
* [valuecounter](./plugins/aggregators/valuecounter)
|
* [histogram](./plugins/aggregators/histogram)
|
||||||
|
|
||||||
## Output Plugins
|
## Output Plugins
|
||||||
|
|
||||||
* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x)
|
* [influxdb](./plugins/outputs/influxdb)
|
||||||
* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb))
|
|
||||||
* [amon](./plugins/outputs/amon)
|
* [amon](./plugins/outputs/amon)
|
||||||
* [amqp](./plugins/outputs/amqp) (rabbitmq)
|
* [amqp](./plugins/outputs/amqp) (rabbitmq)
|
||||||
* [application_insights](./plugins/outputs/application_insights)
|
|
||||||
* [aws kinesis](./plugins/outputs/kinesis)
|
* [aws kinesis](./plugins/outputs/kinesis)
|
||||||
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
* [aws cloudwatch](./plugins/outputs/cloudwatch)
|
||||||
* [azure_monitor](./plugins/outputs/azure_monitor)
|
|
||||||
* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub
|
|
||||||
* [cratedb](./plugins/outputs/cratedb)
|
* [cratedb](./plugins/outputs/cratedb)
|
||||||
* [datadog](./plugins/outputs/datadog)
|
* [datadog](./plugins/outputs/datadog)
|
||||||
* [discard](./plugins/outputs/discard)
|
* [discard](./plugins/outputs/discard)
|
||||||
* [elasticsearch](./plugins/outputs/elasticsearch)
|
* [elasticsearch](./plugins/outputs/elasticsearch)
|
||||||
* [exec](./plugins/outputs/exec)
|
|
||||||
* [file](./plugins/outputs/file)
|
* [file](./plugins/outputs/file)
|
||||||
* [graphite](./plugins/outputs/graphite)
|
* [graphite](./plugins/outputs/graphite)
|
||||||
* [graylog](./plugins/outputs/graylog)
|
* [graylog](./plugins/outputs/graylog)
|
||||||
* [health](./plugins/outputs/health)
|
|
||||||
* [http](./plugins/outputs/http)
|
|
||||||
* [instrumental](./plugins/outputs/instrumental)
|
* [instrumental](./plugins/outputs/instrumental)
|
||||||
* [kafka](./plugins/outputs/kafka)
|
* [kafka](./plugins/outputs/kafka)
|
||||||
* [librato](./plugins/outputs/librato)
|
* [librato](./plugins/outputs/librato)
|
||||||
* [mqtt](./plugins/outputs/mqtt)
|
* [mqtt](./plugins/outputs/mqtt)
|
||||||
* [nats](./plugins/outputs/nats)
|
* [nats](./plugins/outputs/nats)
|
||||||
* [newrelic](./plugins/outputs/newrelic)
|
|
||||||
* [nsq](./plugins/outputs/nsq)
|
* [nsq](./plugins/outputs/nsq)
|
||||||
* [opentsdb](./plugins/outputs/opentsdb)
|
* [opentsdb](./plugins/outputs/opentsdb)
|
||||||
* [prometheus](./plugins/outputs/prometheus_client)
|
* [prometheus](./plugins/outputs/prometheus_client)
|
||||||
* [riemann](./plugins/outputs/riemann)
|
* [riemann](./plugins/outputs/riemann)
|
||||||
* [riemann_legacy](./plugins/outputs/riemann_legacy)
|
* [riemann_legacy](./plugins/outputs/riemann_legacy)
|
||||||
* [socket_writer](./plugins/outputs/socket_writer)
|
* [socket_writer](./plugins/outputs/socket_writer)
|
||||||
* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring)
|
|
||||||
* [syslog](./plugins/outputs/syslog)
|
|
||||||
* [tcp](./plugins/outputs/socket_writer)
|
* [tcp](./plugins/outputs/socket_writer)
|
||||||
* [udp](./plugins/outputs/socket_writer)
|
* [udp](./plugins/outputs/socket_writer)
|
||||||
* [warp10](./plugins/outputs/warp10)
|
|
||||||
* [wavefront](./plugins/outputs/wavefront)
|
* [wavefront](./plugins/outputs/wavefront)
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
package telegraf
|
package telegraf
|
||||||
|
|
||||||
import (
|
import "time"
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Accumulator allows adding metrics to the processing flow.
|
// Accumulator is an interface for "accumulating" metrics from plugin(s).
|
||||||
|
// The metrics are sent down a channel shared between all plugins.
|
||||||
type Accumulator interface {
|
type Accumulator interface {
|
||||||
// AddFields adds a metric to the accumulator with the given measurement
|
// AddFields adds a metric to the accumulator with the given measurement
|
||||||
// name, fields, and tags (and timestamp). If a timestamp is not provided,
|
// name, fields, and tags (and timestamp). If a timestamp is not provided,
|
||||||
// then the accumulator sets it to "now".
|
// then the accumulator sets it to "now".
|
||||||
|
// Create a point with a value, decorating it with tags
|
||||||
|
// NOTE: tags is expected to be owned by the caller, don't mutate
|
||||||
|
// it after passing to Add.
|
||||||
AddFields(measurement string,
|
AddFields(measurement string,
|
||||||
fields map[string]interface{},
|
fields map[string]interface{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
|
@ -38,48 +40,7 @@ type Accumulator interface {
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
t ...time.Time)
|
t ...time.Time)
|
||||||
|
|
||||||
// AddMetric adds an metric to the accumulator.
|
SetPrecision(precision, interval time.Duration)
|
||||||
AddMetric(Metric)
|
|
||||||
|
|
||||||
// SetPrecision sets the timestamp rounding precision. All metrics addeds
|
|
||||||
// added to the accumulator will have their timestamp rounded to the
|
|
||||||
// nearest multiple of precision.
|
|
||||||
SetPrecision(precision time.Duration)
|
|
||||||
|
|
||||||
// Report an error.
|
|
||||||
AddError(err error)
|
AddError(err error)
|
||||||
|
|
||||||
// Upgrade to a TrackingAccumulator with space for maxTracked
|
|
||||||
// metrics/batches.
|
|
||||||
WithTracking(maxTracked int) TrackingAccumulator
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrackingID uniquely identifies a tracked metric group
|
|
||||||
type TrackingID uint64
|
|
||||||
|
|
||||||
// DeliveryInfo provides the results of a delivered metric group.
|
|
||||||
type DeliveryInfo interface {
|
|
||||||
// ID is the TrackingID
|
|
||||||
ID() TrackingID
|
|
||||||
|
|
||||||
// Delivered returns true if the metric was processed successfully.
|
|
||||||
Delivered() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrackingAccumulator is an Accumulator that provides a signal when the
|
|
||||||
// metric has been fully processed. Sending more metrics than the accumulator
|
|
||||||
// has been allocated for without reading status from the Accepted or Rejected
|
|
||||||
// channels is an error.
|
|
||||||
type TrackingAccumulator interface {
|
|
||||||
Accumulator
|
|
||||||
|
|
||||||
// Add the Metric and arrange for tracking feedback after processing..
|
|
||||||
AddTrackingMetric(m Metric) TrackingID
|
|
||||||
|
|
||||||
// Add a group of Metrics and arrange for a signal when the group has been
|
|
||||||
// processed.
|
|
||||||
AddTrackingMetricGroup(group []Metric) TrackingID
|
|
||||||
|
|
||||||
// Delivered returns a channel that will contain the tracking results.
|
|
||||||
Delivered() <-chan DeliveryInfo
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,27 +1,31 @@
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/metric"
|
"github.com/influxdata/telegraf/selfstat"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
NErrors = selfstat.Register("agent", "gather_errors", map[string]string{})
|
||||||
)
|
)
|
||||||
|
|
||||||
type MetricMaker interface {
|
type MetricMaker interface {
|
||||||
LogName() string
|
Name() string
|
||||||
MakeMetric(metric telegraf.Metric) telegraf.Metric
|
MakeMetric(
|
||||||
Log() telegraf.Logger
|
measurement string,
|
||||||
}
|
fields map[string]interface{},
|
||||||
|
tags map[string]string,
|
||||||
type accumulator struct {
|
mType telegraf.ValueType,
|
||||||
maker MetricMaker
|
t time.Time,
|
||||||
metrics chan<- telegraf.Metric
|
) telegraf.Metric
|
||||||
precision time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAccumulator(
|
func NewAccumulator(
|
||||||
maker MetricMaker,
|
maker MetricMaker,
|
||||||
metrics chan<- telegraf.Metric,
|
metrics chan telegraf.Metric,
|
||||||
) telegraf.Accumulator {
|
) telegraf.Accumulator {
|
||||||
acc := accumulator{
|
acc := accumulator{
|
||||||
maker: maker,
|
maker: maker,
|
||||||
|
@ -31,13 +35,23 @@ func NewAccumulator(
|
||||||
return &acc
|
return &acc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type accumulator struct {
|
||||||
|
metrics chan telegraf.Metric
|
||||||
|
|
||||||
|
maker MetricMaker
|
||||||
|
|
||||||
|
precision time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
func (ac *accumulator) AddFields(
|
func (ac *accumulator) AddFields(
|
||||||
measurement string,
|
measurement string,
|
||||||
fields map[string]interface{},
|
fields map[string]interface{},
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
t ...time.Time,
|
t ...time.Time,
|
||||||
) {
|
) {
|
||||||
ac.addFields(measurement, tags, fields, telegraf.Untyped, t...)
|
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil {
|
||||||
|
ac.metrics <- m
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) AddGauge(
|
func (ac *accumulator) AddGauge(
|
||||||
|
@ -46,7 +60,9 @@ func (ac *accumulator) AddGauge(
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
t ...time.Time,
|
t ...time.Time,
|
||||||
) {
|
) {
|
||||||
ac.addFields(measurement, tags, fields, telegraf.Gauge, t...)
|
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil {
|
||||||
|
ac.metrics <- m
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) AddCounter(
|
func (ac *accumulator) AddCounter(
|
||||||
|
@ -55,7 +71,9 @@ func (ac *accumulator) AddCounter(
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
t ...time.Time,
|
t ...time.Time,
|
||||||
) {
|
) {
|
||||||
ac.addFields(measurement, tags, fields, telegraf.Counter, t...)
|
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil {
|
||||||
|
ac.metrics <- m
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) AddSummary(
|
func (ac *accumulator) AddSummary(
|
||||||
|
@ -64,7 +82,9 @@ func (ac *accumulator) AddSummary(
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
t ...time.Time,
|
t ...time.Time,
|
||||||
) {
|
) {
|
||||||
ac.addFields(measurement, tags, fields, telegraf.Summary, t...)
|
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil {
|
||||||
|
ac.metrics <- m
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) AddHistogram(
|
func (ac *accumulator) AddHistogram(
|
||||||
|
@ -73,28 +93,7 @@ func (ac *accumulator) AddHistogram(
|
||||||
tags map[string]string,
|
tags map[string]string,
|
||||||
t ...time.Time,
|
t ...time.Time,
|
||||||
) {
|
) {
|
||||||
ac.addFields(measurement, tags, fields, telegraf.Histogram, t...)
|
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil {
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) AddMetric(m telegraf.Metric) {
|
|
||||||
m.SetTime(m.Time().Round(ac.precision))
|
|
||||||
if m := ac.maker.MakeMetric(m); m != nil {
|
|
||||||
ac.metrics <- m
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ac *accumulator) addFields(
|
|
||||||
measurement string,
|
|
||||||
tags map[string]string,
|
|
||||||
fields map[string]interface{},
|
|
||||||
tp telegraf.ValueType,
|
|
||||||
t ...time.Time,
|
|
||||||
) {
|
|
||||||
m, err := metric.New(measurement, tags, fields, ac.getTime(t), tp)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if m := ac.maker.MakeMetric(m); m != nil {
|
|
||||||
ac.metrics <- m
|
ac.metrics <- m
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,14 +104,33 @@ func (ac *accumulator) AddError(err error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ac.maker.Log().Errorf("Error in plugin: %v", err)
|
NErrors.Incr(1)
|
||||||
|
//TODO suppress/throttle consecutive duplicate errors?
|
||||||
|
log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) SetPrecision(precision time.Duration) {
|
// SetPrecision takes two time.Duration objects. If the first is non-zero,
|
||||||
|
// it sets that as the precision. Otherwise, it takes the second argument
|
||||||
|
// as the order of time that the metrics should be rounded to, with the
|
||||||
|
// maximum being 1s.
|
||||||
|
func (ac *accumulator) SetPrecision(precision, interval time.Duration) {
|
||||||
|
if precision > 0 {
|
||||||
ac.precision = precision
|
ac.precision = precision
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case interval >= time.Second:
|
||||||
|
ac.precision = time.Second
|
||||||
|
case interval >= time.Millisecond:
|
||||||
|
ac.precision = time.Millisecond
|
||||||
|
case interval >= time.Microsecond:
|
||||||
|
ac.precision = time.Microsecond
|
||||||
|
default:
|
||||||
|
ac.precision = time.Nanosecond
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) getTime(t []time.Time) time.Time {
|
func (ac accumulator) getTime(t []time.Time) time.Time {
|
||||||
var timestamp time.Time
|
var timestamp time.Time
|
||||||
if len(t) > 0 {
|
if len(t) > 0 {
|
||||||
timestamp = t[0]
|
timestamp = t[0]
|
||||||
|
@ -121,43 +139,3 @@ func (ac *accumulator) getTime(t []time.Time) time.Time {
|
||||||
}
|
}
|
||||||
return timestamp.Round(ac.precision)
|
return timestamp.Round(ac.precision)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ac *accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator {
|
|
||||||
return &trackingAccumulator{
|
|
||||||
Accumulator: ac,
|
|
||||||
delivered: make(chan telegraf.DeliveryInfo, maxTracked),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type trackingAccumulator struct {
|
|
||||||
telegraf.Accumulator
|
|
||||||
delivered chan telegraf.DeliveryInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *trackingAccumulator) AddTrackingMetric(m telegraf.Metric) telegraf.TrackingID {
|
|
||||||
dm, id := metric.WithTracking(m, a.onDelivery)
|
|
||||||
a.AddMetric(dm)
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *trackingAccumulator) AddTrackingMetricGroup(group []telegraf.Metric) telegraf.TrackingID {
|
|
||||||
db, id := metric.WithGroupTracking(group, a.onDelivery)
|
|
||||||
for _, m := range db {
|
|
||||||
a.AddMetric(m)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *trackingAccumulator) Delivered() <-chan telegraf.DeliveryInfo {
|
|
||||||
return a.delivered
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *trackingAccumulator) onDelivery(info telegraf.DeliveryInfo) {
|
|
||||||
select {
|
|
||||||
case a.delivered <- info:
|
|
||||||
default:
|
|
||||||
// This is a programming error in the input. More items were sent for
|
|
||||||
// tracking than space requested.
|
|
||||||
panic("channel is full")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -9,41 +9,69 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/models"
|
"github.com/influxdata/telegraf/metric"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAddFields(t *testing.T) {
|
func TestAdd(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
metrics := make(chan telegraf.Metric, 10)
|
||||||
|
defer close(metrics)
|
||||||
|
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||||
|
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{})
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"})
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddFields(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
metrics := make(chan telegraf.Metric, 10)
|
metrics := make(chan telegraf.Metric, 10)
|
||||||
defer close(metrics)
|
defer close(metrics)
|
||||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||||
|
|
||||||
tags := map[string]string{"foo": "bar"}
|
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"usage": float64(99),
|
"usage": float64(99),
|
||||||
}
|
}
|
||||||
now := time.Now()
|
a.AddFields("acctest", fields, map[string]string{})
|
||||||
a.AddCounter("acctest", fields, tags, now)
|
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
|
||||||
|
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
testm := <-metrics
|
testm := <-metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest usage=99")
|
||||||
|
|
||||||
require.Equal(t, "acctest", testm.Name())
|
testm = <-metrics
|
||||||
actual, ok := testm.GetField("usage")
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test usage=99")
|
||||||
|
|
||||||
require.True(t, ok)
|
testm = <-metrics
|
||||||
require.Equal(t, float64(99), actual)
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
actual, ok = testm.GetTag("foo")
|
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
|
||||||
require.True(t, ok)
|
actual)
|
||||||
require.Equal(t, "bar", actual)
|
|
||||||
|
|
||||||
tm := testm.Time()
|
|
||||||
// okay if monotonic clock differs
|
|
||||||
require.True(t, now.Equal(tm))
|
|
||||||
|
|
||||||
tp := testm.Type()
|
|
||||||
require.Equal(t, telegraf.Counter, tp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAddError(t *testing.T) {
|
func TestAccAddError(t *testing.T) {
|
||||||
|
@ -60,6 +88,7 @@ func TestAccAddError(t *testing.T) {
|
||||||
a.AddError(fmt.Errorf("baz"))
|
a.AddError(fmt.Errorf("baz"))
|
||||||
|
|
||||||
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
errs := bytes.Split(errBuf.Bytes(), []byte{'\n'})
|
||||||
|
assert.EqualValues(t, int64(3), NErrors.Get())
|
||||||
require.Len(t, errs, 4) // 4 because of trailing newline
|
require.Len(t, errs, 4) // 4 because of trailing newline
|
||||||
assert.Contains(t, string(errs[0]), "TestPlugin")
|
assert.Contains(t, string(errs[0]), "TestPlugin")
|
||||||
assert.Contains(t, string(errs[0]), "foo")
|
assert.Contains(t, string(errs[0]), "foo")
|
||||||
|
@ -69,75 +98,215 @@ func TestAccAddError(t *testing.T) {
|
||||||
assert.Contains(t, string(errs[2]), "baz")
|
assert.Contains(t, string(errs[2]), "baz")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSetPrecision(t *testing.T) {
|
func TestAddNoIntervalWithPrecision(t *testing.T) {
|
||||||
tests := []struct {
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
name string
|
|
||||||
unset bool
|
|
||||||
precision time.Duration
|
|
||||||
timestamp time.Time
|
|
||||||
expected time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "default precision is nanosecond",
|
|
||||||
unset: true,
|
|
||||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
|
||||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "second interval",
|
|
||||||
precision: time.Second,
|
|
||||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
|
||||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 0, time.UTC),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "microsecond interval",
|
|
||||||
precision: time.Microsecond,
|
|
||||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
|
|
||||||
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82913000, time.UTC),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "2 second precision",
|
|
||||||
precision: 2 * time.Second,
|
|
||||||
timestamp: time.Date(2006, time.February, 10, 12, 0, 2, 4, time.UTC),
|
|
||||||
expected: time.Date(2006, time.February, 10, 12, 0, 2, 0, time.UTC),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
metrics := make(chan telegraf.Metric, 10)
|
metrics := make(chan telegraf.Metric, 10)
|
||||||
|
defer close(metrics)
|
||||||
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||||
if !tt.unset {
|
a.SetPrecision(0, time.Second)
|
||||||
a.SetPrecision(tt.precision)
|
|
||||||
}
|
|
||||||
|
|
||||||
a.AddFields("acctest",
|
a.AddFields("acctest",
|
||||||
map[string]interface{}{"value": float64(101)},
|
map[string]interface{}{"value": float64(101)},
|
||||||
map[string]string{},
|
map[string]string{})
|
||||||
tt.timestamp,
|
a.AddFields("acctest",
|
||||||
)
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"})
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
testm := <-metrics
|
testm := <-metrics
|
||||||
require.Equal(t, tt.expected, testm.Time())
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
close(metrics)
|
testm = <-metrics
|
||||||
})
|
actual = testm.String()
|
||||||
}
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddTrackingMetricGroupEmpty(t *testing.T) {
|
func TestAddDisablePrecision(t *testing.T) {
|
||||||
ch := make(chan telegraf.Metric, 10)
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
metrics := []telegraf.Metric{}
|
metrics := make(chan telegraf.Metric, 10)
|
||||||
acc := NewAccumulator(&TestMetricMaker{}, ch).WithTracking(1)
|
defer close(metrics)
|
||||||
|
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||||
|
|
||||||
id := acc.AddTrackingMetricGroup(metrics)
|
a.SetPrecision(time.Nanosecond, 0)
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{})
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"})
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
select {
|
testm := <-metrics
|
||||||
case tracking := <-acc.Delivered():
|
actual := testm.String()
|
||||||
require.Equal(t, tracking.ID(), id)
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
default:
|
|
||||||
t.Fatal("empty group should be delivered immediately")
|
testm = <-metrics
|
||||||
}
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddNoPrecisionWithInterval(t *testing.T) {
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
metrics := make(chan telegraf.Metric, 10)
|
||||||
|
defer close(metrics)
|
||||||
|
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Second)
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{})
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"})
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDifferentPrecisions(t *testing.T) {
|
||||||
|
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
|
||||||
|
metrics := make(chan telegraf.Metric, 10)
|
||||||
|
defer close(metrics)
|
||||||
|
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Second)
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
testm := <-metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Millisecond)
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Microsecond)
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
|
||||||
|
actual)
|
||||||
|
|
||||||
|
a.SetPrecision(0, time.Nanosecond)
|
||||||
|
a.AddFields("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
|
||||||
|
actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddGauge(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
metrics := make(chan telegraf.Metric, 10)
|
||||||
|
defer close(metrics)
|
||||||
|
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||||
|
|
||||||
|
a.AddGauge("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{})
|
||||||
|
a.AddGauge("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"})
|
||||||
|
a.AddGauge("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
assert.Equal(t, testm.Type(), telegraf.Gauge)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddCounter(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
metrics := make(chan telegraf.Metric, 10)
|
||||||
|
defer close(metrics)
|
||||||
|
a := NewAccumulator(&TestMetricMaker{}, metrics)
|
||||||
|
|
||||||
|
a.AddCounter("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{})
|
||||||
|
a.AddCounter("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"})
|
||||||
|
a.AddCounter("acctest",
|
||||||
|
map[string]interface{}{"value": float64(101)},
|
||||||
|
map[string]string{"acc": "test"}, now)
|
||||||
|
|
||||||
|
testm := <-metrics
|
||||||
|
actual := testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest value=101")
|
||||||
|
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Contains(t, actual, "acctest,acc=test value=101")
|
||||||
|
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||||
|
|
||||||
|
testm = <-metrics
|
||||||
|
actual = testm.String()
|
||||||
|
assert.Equal(t,
|
||||||
|
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
|
||||||
|
actual)
|
||||||
|
assert.Equal(t, testm.Type(), telegraf.Counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
type TestMetricMaker struct {
|
type TestMetricMaker struct {
|
||||||
|
@ -146,15 +315,26 @@ type TestMetricMaker struct {
|
||||||
func (tm *TestMetricMaker) Name() string {
|
func (tm *TestMetricMaker) Name() string {
|
||||||
return "TestPlugin"
|
return "TestPlugin"
|
||||||
}
|
}
|
||||||
|
func (tm *TestMetricMaker) MakeMetric(
|
||||||
func (tm *TestMetricMaker) LogName() string {
|
measurement string,
|
||||||
return tm.Name()
|
fields map[string]interface{},
|
||||||
}
|
tags map[string]string,
|
||||||
|
mType telegraf.ValueType,
|
||||||
func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric {
|
t time.Time,
|
||||||
return metric
|
) telegraf.Metric {
|
||||||
}
|
switch mType {
|
||||||
|
case telegraf.Untyped:
|
||||||
func (tm *TestMetricMaker) Log() telegraf.Logger {
|
if m, err := metric.New(measurement, tags, fields, t); err == nil {
|
||||||
return models.NewLogger("TestPlugin", "test", "")
|
return m
|
||||||
|
}
|
||||||
|
case telegraf.Counter:
|
||||||
|
if m, err := metric.New(measurement, tags, fields, t, telegraf.Counter); err == nil {
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
case telegraf.Gauge:
|
||||||
|
if m, err := metric.New(measurement, tags, fields, t, telegraf.Gauge); err == nil {
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
1427
agent/agent.go
1427
agent/agent.go
File diff suppressed because it is too large
Load Diff
|
@ -1,19 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package agent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
const flushSignal = syscall.SIGUSR1
|
|
||||||
|
|
||||||
func watchForFlushSignal(flushRequested chan os.Signal) {
|
|
||||||
signal.Notify(flushRequested, flushSignal)
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopListeningForFlushSignal(flushRequested chan os.Signal) {
|
|
||||||
defer signal.Stop(flushRequested)
|
|
||||||
}
|
|
|
@ -2,13 +2,15 @@ package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/config"
|
"github.com/influxdata/telegraf/internal/config"
|
||||||
|
|
||||||
|
// needing to load the plugins
|
||||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||||
|
// needing to load the outputs
|
||||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAgent_OmitHostname(t *testing.T) {
|
func TestAgent_OmitHostname(t *testing.T) {
|
||||||
|
@ -22,35 +24,35 @@ func TestAgent_OmitHostname(t *testing.T) {
|
||||||
func TestAgent_LoadPlugin(t *testing.T) {
|
func TestAgent_LoadPlugin(t *testing.T) {
|
||||||
c := config.NewConfig()
|
c := config.NewConfig()
|
||||||
c.InputFilters = []string{"mysql"}
|
c.InputFilters = []string{"mysql"}
|
||||||
err := c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ := NewAgent(c)
|
a, _ := NewAgent(c)
|
||||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.InputFilters = []string{"foo"}
|
c.InputFilters = []string{"foo"}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 0, len(a.Config.Inputs))
|
assert.Equal(t, 0, len(a.Config.Inputs))
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.InputFilters = []string{"mysql", "foo"}
|
c.InputFilters = []string{"mysql", "foo"}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 1, len(a.Config.Inputs))
|
assert.Equal(t, 1, len(a.Config.Inputs))
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.InputFilters = []string{"mysql", "redis"}
|
c.InputFilters = []string{"mysql", "redis"}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 2, len(a.Config.Inputs))
|
assert.Equal(t, 2, len(a.Config.Inputs))
|
||||||
|
@ -59,42 +61,42 @@ func TestAgent_LoadPlugin(t *testing.T) {
|
||||||
func TestAgent_LoadOutput(t *testing.T) {
|
func TestAgent_LoadOutput(t *testing.T) {
|
||||||
c := config.NewConfig()
|
c := config.NewConfig()
|
||||||
c.OutputFilters = []string{"influxdb"}
|
c.OutputFilters = []string{"influxdb"}
|
||||||
err := c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ := NewAgent(c)
|
a, _ := NewAgent(c)
|
||||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.OutputFilters = []string{"kafka"}
|
c.OutputFilters = []string{"kafka"}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 1, len(a.Config.Outputs))
|
assert.Equal(t, 1, len(a.Config.Outputs))
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.OutputFilters = []string{}
|
c.OutputFilters = []string{}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.OutputFilters = []string{"foo"}
|
c.OutputFilters = []string{"foo"}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 0, len(a.Config.Outputs))
|
assert.Equal(t, 0, len(a.Config.Outputs))
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.OutputFilters = []string{"influxdb", "foo"}
|
c.OutputFilters = []string{"influxdb", "foo"}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 2, len(a.Config.Outputs))
|
assert.Equal(t, 2, len(a.Config.Outputs))
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.OutputFilters = []string{"influxdb", "kafka"}
|
c.OutputFilters = []string{"influxdb", "kafka"}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 3, len(c.Outputs))
|
assert.Equal(t, 3, len(c.Outputs))
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
|
@ -102,67 +104,8 @@ func TestAgent_LoadOutput(t *testing.T) {
|
||||||
|
|
||||||
c = config.NewConfig()
|
c = config.NewConfig()
|
||||||
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"}
|
||||||
err = c.LoadConfig("../config/testdata/telegraf-agent.toml")
|
err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
a, _ = NewAgent(c)
|
a, _ = NewAgent(c)
|
||||||
assert.Equal(t, 3, len(a.Config.Outputs))
|
assert.Equal(t, 3, len(a.Config.Outputs))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWindow(t *testing.T) {
|
|
||||||
parse := func(s string) time.Time {
|
|
||||||
tm, err := time.Parse(time.RFC3339, s)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return tm
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
start time.Time
|
|
||||||
roundInterval bool
|
|
||||||
period time.Duration
|
|
||||||
since time.Time
|
|
||||||
until time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "round with exact alignment",
|
|
||||||
start: parse("2018-03-27T00:00:00Z"),
|
|
||||||
roundInterval: true,
|
|
||||||
period: 30 * time.Second,
|
|
||||||
since: parse("2018-03-27T00:00:00Z"),
|
|
||||||
until: parse("2018-03-27T00:00:30Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "round with alignment needed",
|
|
||||||
start: parse("2018-03-27T00:00:05Z"),
|
|
||||||
roundInterval: true,
|
|
||||||
period: 30 * time.Second,
|
|
||||||
since: parse("2018-03-27T00:00:00Z"),
|
|
||||||
until: parse("2018-03-27T00:00:30Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no round with exact alignment",
|
|
||||||
start: parse("2018-03-27T00:00:00Z"),
|
|
||||||
roundInterval: false,
|
|
||||||
period: 30 * time.Second,
|
|
||||||
since: parse("2018-03-27T00:00:00Z"),
|
|
||||||
until: parse("2018-03-27T00:00:30Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no found with alignment needed",
|
|
||||||
start: parse("2018-03-27T00:00:05Z"),
|
|
||||||
roundInterval: false,
|
|
||||||
period: 30 * time.Second,
|
|
||||||
since: parse("2018-03-27T00:00:05Z"),
|
|
||||||
until: parse("2018-03-27T00:00:35Z"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
since, until := updateWindow(tt.start, tt.roundInterval, tt.period)
|
|
||||||
require.Equal(t, tt.since, since, "since")
|
|
||||||
require.Equal(t, tt.until, until, "until")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package agent
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
func watchForFlushSignal(flushRequested chan os.Signal) {
|
|
||||||
// not supported
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopListeningForFlushSignal(flushRequested chan os.Signal) {
|
|
||||||
// not supported
|
|
||||||
}
|
|
268
agent/tick.go
268
agent/tick.go
|
@ -1,268 +0,0 @@
|
||||||
package agent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/benbjohnson/clock"
|
|
||||||
"github.com/influxdata/telegraf/internal"
|
|
||||||
)
|
|
||||||
|
|
||||||
type empty struct{}
|
|
||||||
|
|
||||||
type Ticker interface {
|
|
||||||
Elapsed() <-chan time.Time
|
|
||||||
Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AlignedTicker delivers ticks at aligned times plus an optional jitter. Each
|
|
||||||
// tick is realigned to avoid drift and handle changes to the system clock.
|
|
||||||
//
|
|
||||||
// The ticks may have an jitter duration applied to them as an random offset to
|
|
||||||
// the interval. However the overall pace of is that of the interval, so on
|
|
||||||
// average you will have one collection each interval.
|
|
||||||
//
|
|
||||||
// The first tick is emitted at the next alignment.
|
|
||||||
//
|
|
||||||
// Ticks are dropped for slow consumers.
|
|
||||||
//
|
|
||||||
// The implementation currently does not recalculate until the next tick with
|
|
||||||
// no maximum sleep, when using large intervals alignment is not corrected
|
|
||||||
// until the next tick.
|
|
||||||
type AlignedTicker struct {
|
|
||||||
interval time.Duration
|
|
||||||
jitter time.Duration
|
|
||||||
ch chan time.Time
|
|
||||||
cancel context.CancelFunc
|
|
||||||
wg sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAlignedTicker(now time.Time, interval, jitter time.Duration) *AlignedTicker {
|
|
||||||
return newAlignedTicker(now, interval, jitter, clock.New())
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAlignedTicker(now time.Time, interval, jitter time.Duration, clock clock.Clock) *AlignedTicker {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
t := &AlignedTicker{
|
|
||||||
interval: interval,
|
|
||||||
jitter: jitter,
|
|
||||||
ch: make(chan time.Time, 1),
|
|
||||||
cancel: cancel,
|
|
||||||
}
|
|
||||||
|
|
||||||
d := t.next(now)
|
|
||||||
timer := clock.Timer(d)
|
|
||||||
|
|
||||||
t.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer t.wg.Done()
|
|
||||||
t.run(ctx, timer)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *AlignedTicker) next(now time.Time) time.Duration {
|
|
||||||
next := internal.AlignTime(now, t.interval)
|
|
||||||
d := next.Sub(now)
|
|
||||||
if d == 0 {
|
|
||||||
d = t.interval
|
|
||||||
}
|
|
||||||
d += internal.RandomDuration(t.jitter)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *AlignedTicker) run(ctx context.Context, timer *clock.Timer) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
timer.Stop()
|
|
||||||
return
|
|
||||||
case now := <-timer.C:
|
|
||||||
select {
|
|
||||||
case t.ch <- now:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
d := t.next(now)
|
|
||||||
timer.Reset(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *AlignedTicker) Elapsed() <-chan time.Time {
|
|
||||||
return t.ch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *AlignedTicker) Stop() {
|
|
||||||
t.cancel()
|
|
||||||
t.wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnalignedTicker delivers ticks at regular but unaligned intervals. No
|
|
||||||
// effort is made to avoid drift.
|
|
||||||
//
|
|
||||||
// The ticks may have an jitter duration applied to them as an random offset to
|
|
||||||
// the interval. However the overall pace of is that of the interval, so on
|
|
||||||
// average you will have one collection each interval.
|
|
||||||
//
|
|
||||||
// The first tick is emitted immediately.
|
|
||||||
//
|
|
||||||
// Ticks are dropped for slow consumers.
|
|
||||||
type UnalignedTicker struct {
|
|
||||||
interval time.Duration
|
|
||||||
jitter time.Duration
|
|
||||||
ch chan time.Time
|
|
||||||
cancel context.CancelFunc
|
|
||||||
wg sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewUnalignedTicker(interval, jitter time.Duration) *UnalignedTicker {
|
|
||||||
return newUnalignedTicker(interval, jitter, clock.New())
|
|
||||||
}
|
|
||||||
|
|
||||||
func newUnalignedTicker(interval, jitter time.Duration, clock clock.Clock) *UnalignedTicker {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
t := &UnalignedTicker{
|
|
||||||
interval: interval,
|
|
||||||
jitter: jitter,
|
|
||||||
ch: make(chan time.Time, 1),
|
|
||||||
cancel: cancel,
|
|
||||||
}
|
|
||||||
|
|
||||||
ticker := clock.Ticker(t.interval)
|
|
||||||
t.ch <- clock.Now()
|
|
||||||
|
|
||||||
t.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer t.wg.Done()
|
|
||||||
t.run(ctx, ticker, clock)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func sleep(ctx context.Context, duration time.Duration, clock clock.Clock) error {
|
|
||||||
if duration == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
t := clock.Timer(duration)
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
t.Stop()
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *UnalignedTicker) run(ctx context.Context, ticker *clock.Ticker, clock clock.Clock) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
ticker.Stop()
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
jitter := internal.RandomDuration(t.jitter)
|
|
||||||
err := sleep(ctx, jitter, clock)
|
|
||||||
if err != nil {
|
|
||||||
ticker.Stop()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case t.ch <- clock.Now():
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *UnalignedTicker) InjectTick() {
|
|
||||||
t.ch <- time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *UnalignedTicker) Elapsed() <-chan time.Time {
|
|
||||||
return t.ch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *UnalignedTicker) Stop() {
|
|
||||||
t.cancel()
|
|
||||||
t.wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// RollingTicker delivers ticks at regular but unaligned intervals.
|
|
||||||
//
|
|
||||||
// Because the next interval is scheduled based on the interval + jitter, you
|
|
||||||
// are guaranteed at least interval seconds without missing a tick and ticks
|
|
||||||
// will be evenly scheduled over time.
|
|
||||||
//
|
|
||||||
// On average you will have one collection each interval + (jitter/2).
|
|
||||||
//
|
|
||||||
// The first tick is emitted after interval+jitter seconds.
|
|
||||||
//
|
|
||||||
// Ticks are dropped for slow consumers.
|
|
||||||
type RollingTicker struct {
|
|
||||||
interval time.Duration
|
|
||||||
jitter time.Duration
|
|
||||||
ch chan time.Time
|
|
||||||
cancel context.CancelFunc
|
|
||||||
wg sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRollingTicker(interval, jitter time.Duration) *RollingTicker {
|
|
||||||
return newRollingTicker(interval, jitter, clock.New())
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *RollingTicker {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
t := &RollingTicker{
|
|
||||||
interval: interval,
|
|
||||||
jitter: jitter,
|
|
||||||
ch: make(chan time.Time, 1),
|
|
||||||
cancel: cancel,
|
|
||||||
}
|
|
||||||
|
|
||||||
d := t.next()
|
|
||||||
timer := clock.Timer(d)
|
|
||||||
|
|
||||||
t.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer t.wg.Done()
|
|
||||||
t.run(ctx, timer)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *RollingTicker) next() time.Duration {
|
|
||||||
return t.interval + internal.RandomDuration(t.jitter)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
timer.Stop()
|
|
||||||
return
|
|
||||||
case now := <-timer.C:
|
|
||||||
select {
|
|
||||||
case t.ch <- now:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
d := t.next()
|
|
||||||
timer.Reset(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *RollingTicker) Elapsed() <-chan time.Time {
|
|
||||||
return t.ch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *RollingTicker) Stop() {
|
|
||||||
t.cancel()
|
|
||||||
t.wg.Wait()
|
|
||||||
}
|
|
|
@ -1,251 +0,0 @@
|
||||||
package agent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/benbjohnson/clock"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
var format = "2006-01-02T15:04:05.999Z07:00"
|
|
||||||
|
|
||||||
func TestAlignedTicker(t *testing.T) {
|
|
||||||
interval := 10 * time.Second
|
|
||||||
jitter := 0 * time.Second
|
|
||||||
|
|
||||||
clock := clock.NewMock()
|
|
||||||
since := clock.Now()
|
|
||||||
until := since.Add(60 * time.Second)
|
|
||||||
|
|
||||||
ticker := newAlignedTicker(since, interval, jitter, clock)
|
|
||||||
|
|
||||||
expected := []time.Time{
|
|
||||||
time.Unix(10, 0).UTC(),
|
|
||||||
time.Unix(20, 0).UTC(),
|
|
||||||
time.Unix(30, 0).UTC(),
|
|
||||||
time.Unix(40, 0).UTC(),
|
|
||||||
time.Unix(50, 0).UTC(),
|
|
||||||
time.Unix(60, 0).UTC(),
|
|
||||||
}
|
|
||||||
|
|
||||||
actual := []time.Time{}
|
|
||||||
for !clock.Now().After(until) {
|
|
||||||
select {
|
|
||||||
case tm := <-ticker.Elapsed():
|
|
||||||
actual = append(actual, tm.UTC())
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
clock.Add(10 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Equal(t, expected, actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlignedTickerJitter(t *testing.T) {
|
|
||||||
interval := 10 * time.Second
|
|
||||||
jitter := 5 * time.Second
|
|
||||||
|
|
||||||
clock := clock.NewMock()
|
|
||||||
since := clock.Now()
|
|
||||||
until := since.Add(60 * time.Second)
|
|
||||||
|
|
||||||
ticker := newAlignedTicker(since, interval, jitter, clock)
|
|
||||||
|
|
||||||
last := since
|
|
||||||
for !clock.Now().After(until) {
|
|
||||||
select {
|
|
||||||
case tm := <-ticker.Elapsed():
|
|
||||||
require.True(t, tm.Sub(last) <= 15*time.Second)
|
|
||||||
require.True(t, tm.Sub(last) >= 5*time.Second)
|
|
||||||
last = last.Add(interval)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
clock.Add(5 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlignedTickerMissedTick(t *testing.T) {
|
|
||||||
interval := 10 * time.Second
|
|
||||||
jitter := 0 * time.Second
|
|
||||||
|
|
||||||
clock := clock.NewMock()
|
|
||||||
since := clock.Now()
|
|
||||||
|
|
||||||
ticker := newAlignedTicker(since, interval, jitter, clock)
|
|
||||||
|
|
||||||
clock.Add(25 * time.Second)
|
|
||||||
tm := <-ticker.Elapsed()
|
|
||||||
require.Equal(t, time.Unix(10, 0).UTC(), tm.UTC())
|
|
||||||
clock.Add(5 * time.Second)
|
|
||||||
tm = <-ticker.Elapsed()
|
|
||||||
require.Equal(t, time.Unix(30, 0).UTC(), tm.UTC())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnalignedTicker(t *testing.T) {
|
|
||||||
interval := 10 * time.Second
|
|
||||||
jitter := 0 * time.Second
|
|
||||||
|
|
||||||
clock := clock.NewMock()
|
|
||||||
clock.Add(1 * time.Second)
|
|
||||||
since := clock.Now()
|
|
||||||
until := since.Add(60 * time.Second)
|
|
||||||
|
|
||||||
ticker := newUnalignedTicker(interval, jitter, clock)
|
|
||||||
|
|
||||||
expected := []time.Time{
|
|
||||||
time.Unix(1, 0).UTC(),
|
|
||||||
time.Unix(11, 0).UTC(),
|
|
||||||
time.Unix(21, 0).UTC(),
|
|
||||||
time.Unix(31, 0).UTC(),
|
|
||||||
time.Unix(41, 0).UTC(),
|
|
||||||
time.Unix(51, 0).UTC(),
|
|
||||||
time.Unix(61, 0).UTC(),
|
|
||||||
}
|
|
||||||
|
|
||||||
actual := []time.Time{}
|
|
||||||
for !clock.Now().After(until) {
|
|
||||||
select {
|
|
||||||
case tm := <-ticker.Elapsed():
|
|
||||||
actual = append(actual, tm.UTC())
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
clock.Add(10 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Equal(t, expected, actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRollingTicker(t *testing.T) {
|
|
||||||
interval := 10 * time.Second
|
|
||||||
jitter := 0 * time.Second
|
|
||||||
|
|
||||||
clock := clock.NewMock()
|
|
||||||
clock.Add(1 * time.Second)
|
|
||||||
since := clock.Now()
|
|
||||||
until := since.Add(60 * time.Second)
|
|
||||||
|
|
||||||
ticker := newUnalignedTicker(interval, jitter, clock)
|
|
||||||
|
|
||||||
expected := []time.Time{
|
|
||||||
time.Unix(1, 0).UTC(),
|
|
||||||
time.Unix(11, 0).UTC(),
|
|
||||||
time.Unix(21, 0).UTC(),
|
|
||||||
time.Unix(31, 0).UTC(),
|
|
||||||
time.Unix(41, 0).UTC(),
|
|
||||||
time.Unix(51, 0).UTC(),
|
|
||||||
time.Unix(61, 0).UTC(),
|
|
||||||
}
|
|
||||||
|
|
||||||
actual := []time.Time{}
|
|
||||||
for !clock.Now().After(until) {
|
|
||||||
select {
|
|
||||||
case tm := <-ticker.Elapsed():
|
|
||||||
actual = append(actual, tm.UTC())
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
clock.Add(10 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Equal(t, expected, actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simulates running the Ticker for an hour and displays stats about the
|
|
||||||
// operation.
|
|
||||||
func TestAlignedTickerDistribution(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
interval := 10 * time.Second
|
|
||||||
jitter := 5 * time.Second
|
|
||||||
|
|
||||||
clock := clock.NewMock()
|
|
||||||
since := clock.Now()
|
|
||||||
|
|
||||||
ticker := newAlignedTicker(since, interval, jitter, clock)
|
|
||||||
dist := simulatedDist(ticker, clock)
|
|
||||||
printDist(dist)
|
|
||||||
require.True(t, 350 < dist.Count)
|
|
||||||
require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simulates running the Ticker for an hour and displays stats about the
|
|
||||||
// operation.
|
|
||||||
func TestUnalignedTickerDistribution(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
interval := 10 * time.Second
|
|
||||||
jitter := 5 * time.Second
|
|
||||||
|
|
||||||
clock := clock.NewMock()
|
|
||||||
|
|
||||||
ticker := newUnalignedTicker(interval, jitter, clock)
|
|
||||||
dist := simulatedDist(ticker, clock)
|
|
||||||
printDist(dist)
|
|
||||||
require.True(t, 350 < dist.Count)
|
|
||||||
require.True(t, 9 < dist.Mean() && dist.Mean() < 11)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simulates running the Ticker for an hour and displays stats about the
|
|
||||||
// operation.
|
|
||||||
func TestRollingTickerDistribution(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
interval := 10 * time.Second
|
|
||||||
jitter := 5 * time.Second
|
|
||||||
|
|
||||||
clock := clock.NewMock()
|
|
||||||
|
|
||||||
ticker := newRollingTicker(interval, jitter, clock)
|
|
||||||
dist := simulatedDist(ticker, clock)
|
|
||||||
printDist(dist)
|
|
||||||
require.True(t, 275 < dist.Count)
|
|
||||||
require.True(t, 12 < dist.Mean() && 13 > dist.Mean())
|
|
||||||
}
|
|
||||||
|
|
||||||
type Distribution struct {
|
|
||||||
Buckets [60]int
|
|
||||||
Count int
|
|
||||||
Waittime float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Distribution) Mean() float64 {
|
|
||||||
return d.Waittime / float64(d.Count)
|
|
||||||
}
|
|
||||||
|
|
||||||
func printDist(dist Distribution) {
|
|
||||||
for i, count := range dist.Buckets {
|
|
||||||
fmt.Printf("%2d %s\n", i, strings.Repeat("x", count))
|
|
||||||
}
|
|
||||||
fmt.Printf("Average interval: %f\n", dist.Mean())
|
|
||||||
fmt.Printf("Count: %d\n", dist.Count)
|
|
||||||
}
|
|
||||||
|
|
||||||
func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution {
|
|
||||||
since := clock.Now()
|
|
||||||
until := since.Add(1 * time.Hour)
|
|
||||||
|
|
||||||
var dist Distribution
|
|
||||||
|
|
||||||
last := clock.Now()
|
|
||||||
for !clock.Now().After(until) {
|
|
||||||
select {
|
|
||||||
case tm := <-ticker.Elapsed():
|
|
||||||
dist.Buckets[tm.Second()] += 1
|
|
||||||
dist.Count++
|
|
||||||
dist.Waittime += tm.Sub(last).Seconds()
|
|
||||||
last = tm
|
|
||||||
default:
|
|
||||||
clock.Add(1 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return dist
|
|
||||||
}
|
|
|
@ -5,7 +5,11 @@ package telegraf
|
||||||
// Add, Push, and Reset can not be called concurrently, so locking is not
|
// Add, Push, and Reset can not be called concurrently, so locking is not
|
||||||
// required when implementing an Aggregator plugin.
|
// required when implementing an Aggregator plugin.
|
||||||
type Aggregator interface {
|
type Aggregator interface {
|
||||||
PluginDescriber
|
// SampleConfig returns the default configuration of the Input.
|
||||||
|
SampleConfig() string
|
||||||
|
|
||||||
|
// Description returns a one-sentence description on the Input.
|
||||||
|
Description() string
|
||||||
|
|
||||||
// Add the metric to the aggregator.
|
// Add the metric to the aggregator.
|
||||||
Add(in Metric)
|
Add(in Metric)
|
||||||
|
|
26
appveyor.yml
26
appveyor.yml
|
@ -1,35 +1,33 @@
|
||||||
version: "{build}"
|
version: "{build}"
|
||||||
|
|
||||||
image: Visual Studio 2019
|
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
- C:\gopath\pkg\mod -> go.sum
|
- C:\Cache
|
||||||
- C:\ProgramData\chocolatey\bin -> appveyor.yml
|
|
||||||
- C:\ProgramData\chocolatey\lib -> appveyor.yml
|
|
||||||
|
|
||||||
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
|
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
GOPATH: C:\gopath
|
GOPATH: C:\gopath
|
||||||
|
|
||||||
stack: go 1.14
|
|
||||||
|
|
||||||
platform: x64
|
platform: x64
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- choco install make
|
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||||
- cd "%GOPATH%\src\github.com\influxdata\telegraf"
|
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
|
||||||
- git config --system core.longpaths true
|
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||||
|
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||||
|
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||||
|
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
|
||||||
|
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||||
|
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||||
- go version
|
- go version
|
||||||
- go env
|
- go env
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
- make deps
|
- cmd: C:\GnuWin32\bin\make deps
|
||||||
- make telegraf
|
- cmd: C:\GnuWin32\bin\make telegraf
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
- make check
|
- cmd: C:\GnuWin32\bin\make test-windows
|
||||||
- make test-windows
|
|
||||||
|
|
||||||
artifacts:
|
artifacts:
|
||||||
- path: telegraf.exe
|
- path: telegraf.exe
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
@ -10,15 +8,12 @@ import (
|
||||||
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"sort"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/agent"
|
"github.com/influxdata/telegraf/agent"
|
||||||
"github.com/influxdata/telegraf/config"
|
"github.com/influxdata/telegraf/internal/config"
|
||||||
"github.com/influxdata/telegraf/internal"
|
|
||||||
"github.com/influxdata/telegraf/internal/goplugin"
|
|
||||||
"github.com/influxdata/telegraf/logger"
|
"github.com/influxdata/telegraf/logger"
|
||||||
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
|
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
|
@ -26,26 +21,23 @@ import (
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
"github.com/influxdata/telegraf/plugins/outputs"
|
||||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||||
_ "github.com/influxdata/telegraf/plugins/processors/all"
|
_ "github.com/influxdata/telegraf/plugins/processors/all"
|
||||||
|
"github.com/kardianos/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
// If you update these, update usage.go and usage_windows.go
|
|
||||||
var fDebug = flag.Bool("debug", false,
|
var fDebug = flag.Bool("debug", false,
|
||||||
"turn on debug logging")
|
"turn on debug logging")
|
||||||
var pprofAddr = flag.String("pprof-addr", "",
|
var pprofAddr = flag.String("pprof-addr", "",
|
||||||
"pprof address to listen on, not activate pprof if empty")
|
"pprof address to listen on, not activate pprof if empty")
|
||||||
var fQuiet = flag.Bool("quiet", false,
|
var fQuiet = flag.Bool("quiet", false,
|
||||||
"run in quiet mode")
|
"run in quiet mode")
|
||||||
var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit. Note: Test mode only runs inputs, not processors, aggregators, or outputs")
|
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
|
||||||
var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for service inputs to complete in test mode")
|
|
||||||
var fConfig = flag.String("config", "", "configuration file to load")
|
var fConfig = flag.String("config", "", "configuration file to load")
|
||||||
var fConfigDirectory = flag.String("config-directory", "",
|
var fConfigDirectory = flag.String("config-directory", "",
|
||||||
"directory containing additional *.conf files")
|
"directory containing additional *.conf files")
|
||||||
var fVersion = flag.Bool("version", false, "display the version and exit")
|
var fVersion = flag.Bool("version", false, "display the version")
|
||||||
var fSampleConfig = flag.Bool("sample-config", false,
|
var fSampleConfig = flag.Bool("sample-config", false,
|
||||||
"print out full sample configuration")
|
"print out full sample configuration")
|
||||||
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
|
||||||
var fSectionFilters = flag.String("section-filter", "",
|
|
||||||
"filter the sections to print, separator is ':'. Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs'")
|
|
||||||
var fInputFilters = flag.String("input-filter", "",
|
var fInputFilters = flag.String("input-filter", "",
|
||||||
"filter the inputs to enable, separator is :")
|
"filter the inputs to enable, separator is :")
|
||||||
var fInputList = flag.Bool("input-list", false,
|
var fInputList = flag.Bool("input-list", false,
|
||||||
|
@ -61,23 +53,72 @@ var fProcessorFilters = flag.String("processor-filter", "",
|
||||||
var fUsage = flag.String("usage", "",
|
var fUsage = flag.String("usage", "",
|
||||||
"print usage for a plugin, ie, 'telegraf --usage mysql'")
|
"print usage for a plugin, ie, 'telegraf --usage mysql'")
|
||||||
var fService = flag.String("service", "",
|
var fService = flag.String("service", "",
|
||||||
"operate on the service (windows only)")
|
"operate on the service")
|
||||||
var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)")
|
|
||||||
var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)")
|
|
||||||
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
|
||||||
var fPlugins = flag.String("plugin-directory", "",
|
|
||||||
"path to directory containing external plugins")
|
|
||||||
var fRunOnce = flag.Bool("once", false, "run one gather and exit")
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
nextVersion = "1.6.0"
|
||||||
version string
|
version string
|
||||||
commit string
|
commit string
|
||||||
branch string
|
branch string
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// If commit or branch are not set, make that clear.
|
||||||
|
if commit == "" {
|
||||||
|
commit = "unknown"
|
||||||
|
}
|
||||||
|
if branch == "" {
|
||||||
|
branch = "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
telegraf [commands|flags]
|
||||||
|
|
||||||
|
The commands & flags are:
|
||||||
|
|
||||||
|
config print out full sample configuration to stdout
|
||||||
|
version print the version to stdout
|
||||||
|
|
||||||
|
--config <file> configuration file to load
|
||||||
|
--test gather metrics once, print them to stdout, and exit
|
||||||
|
--config-directory directory containing additional *.conf files
|
||||||
|
--input-filter filter the input plugins to enable, separator is :
|
||||||
|
--output-filter filter the output plugins to enable, separator is :
|
||||||
|
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
|
||||||
|
--debug print metrics as they're generated to stdout
|
||||||
|
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
|
||||||
|
--quiet run in quiet mode
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
# generate a telegraf config file:
|
||||||
|
telegraf config > telegraf.conf
|
||||||
|
|
||||||
|
# generate config with only cpu input & influxdb output plugins defined
|
||||||
|
telegraf --input-filter cpu --output-filter influxdb config
|
||||||
|
|
||||||
|
# run a single telegraf collection, outputing metrics to stdout
|
||||||
|
telegraf --config telegraf.conf --test
|
||||||
|
|
||||||
|
# run telegraf with all plugins defined in config file
|
||||||
|
telegraf --config telegraf.conf
|
||||||
|
|
||||||
|
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
|
||||||
|
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
|
||||||
|
|
||||||
|
# run telegraf with pprof
|
||||||
|
telegraf --config telegraf.conf --pprof-addr localhost:6060
|
||||||
|
`
|
||||||
|
|
||||||
var stop chan struct{}
|
var stop chan struct{}
|
||||||
|
|
||||||
func reloadLoop(
|
func reloadLoop(
|
||||||
|
stop chan struct{},
|
||||||
inputFilters []string,
|
inputFilters []string,
|
||||||
outputFilters []string,
|
outputFilters []string,
|
||||||
aggregatorFilters []string,
|
aggregatorFilters []string,
|
||||||
|
@ -88,102 +129,86 @@ func reloadLoop(
|
||||||
for <-reload {
|
for <-reload {
|
||||||
reload <- false
|
reload <- false
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
signals := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(signals, os.Interrupt, syscall.SIGHUP,
|
|
||||||
syscall.SIGTERM, syscall.SIGINT)
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case sig := <-signals:
|
|
||||||
if sig == syscall.SIGHUP {
|
|
||||||
log.Printf("I! Reloading Telegraf config")
|
|
||||||
<-reload
|
|
||||||
reload <- true
|
|
||||||
}
|
|
||||||
cancel()
|
|
||||||
case <-stop:
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
err := runAgent(ctx, inputFilters, outputFilters)
|
|
||||||
if err != nil && err != context.Canceled {
|
|
||||||
log.Fatalf("E! [telegraf] Error running agent: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runAgent(ctx context.Context,
|
|
||||||
inputFilters []string,
|
|
||||||
outputFilters []string,
|
|
||||||
) error {
|
|
||||||
log.Printf("I! Starting Telegraf %s", version)
|
|
||||||
|
|
||||||
// If no other options are specified, load the config file and run.
|
// If no other options are specified, load the config file and run.
|
||||||
c := config.NewConfig()
|
c := config.NewConfig()
|
||||||
c.OutputFilters = outputFilters
|
c.OutputFilters = outputFilters
|
||||||
c.InputFilters = inputFilters
|
c.InputFilters = inputFilters
|
||||||
err := c.LoadConfig(*fConfig)
|
err := c.LoadConfig(*fConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
log.Fatal("E! " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if *fConfigDirectory != "" {
|
if *fConfigDirectory != "" {
|
||||||
err = c.LoadDirectory(*fConfigDirectory)
|
err = c.LoadDirectory(*fConfigDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
log.Fatal("E! " + err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !*fTest && len(c.Outputs) == 0 {
|
if !*fTest && len(c.Outputs) == 0 {
|
||||||
return errors.New("Error: no outputs found, did you provide a valid config file?")
|
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
|
||||||
}
|
}
|
||||||
if *fPlugins == "" && len(c.Inputs) == 0 {
|
if len(c.Inputs) == 0 {
|
||||||
return errors.New("Error: no inputs found, did you provide a valid config file?")
|
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
|
||||||
}
|
}
|
||||||
|
|
||||||
if int64(c.Agent.Interval.Duration) <= 0 {
|
if int64(c.Agent.Interval.Duration) <= 0 {
|
||||||
return fmt.Errorf("Agent interval must be positive, found %s",
|
log.Fatalf("E! Agent interval must be positive, found %s",
|
||||||
c.Agent.Interval.Duration)
|
c.Agent.Interval.Duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
if int64(c.Agent.FlushInterval.Duration) <= 0 {
|
if int64(c.Agent.FlushInterval.Duration) <= 0 {
|
||||||
return fmt.Errorf("Agent flush_interval must be positive; found %s",
|
log.Fatalf("E! Agent flush_interval must be positive; found %s",
|
||||||
c.Agent.Interval.Duration)
|
c.Agent.Interval.Duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
ag, err := agent.NewAgent(c)
|
ag, err := agent.NewAgent(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
log.Fatal("E! " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup logging as configured.
|
// Setup logging
|
||||||
logConfig := logger.LogConfig{
|
logger.SetupLogging(
|
||||||
Debug: ag.Config.Agent.Debug || *fDebug,
|
ag.Config.Agent.Debug || *fDebug,
|
||||||
Quiet: ag.Config.Agent.Quiet || *fQuiet,
|
ag.Config.Agent.Quiet || *fQuiet,
|
||||||
LogTarget: ag.Config.Agent.LogTarget,
|
ag.Config.Agent.Logfile,
|
||||||
Logfile: ag.Config.Agent.Logfile,
|
)
|
||||||
RotationInterval: ag.Config.Agent.LogfileRotationInterval,
|
|
||||||
RotationMaxSize: ag.Config.Agent.LogfileRotationMaxSize,
|
if *fTest {
|
||||||
RotationMaxArchives: ag.Config.Agent.LogfileRotationMaxArchives,
|
err = ag.Test()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("E! " + err.Error())
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.SetupLogging(logConfig)
|
err = ag.Connect()
|
||||||
|
if err != nil {
|
||||||
if *fRunOnce {
|
log.Fatal("E! " + err.Error())
|
||||||
wait := time.Duration(*fTestWait) * time.Second
|
|
||||||
return ag.Once(ctx, wait)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if *fTest || *fTestWait != 0 {
|
shutdown := make(chan struct{})
|
||||||
wait := time.Duration(*fTestWait) * time.Second
|
signals := make(chan os.Signal)
|
||||||
return ag.Test(ctx, wait)
|
signal.Notify(signals, os.Interrupt, syscall.SIGHUP)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case sig := <-signals:
|
||||||
|
if sig == os.Interrupt {
|
||||||
|
close(shutdown)
|
||||||
}
|
}
|
||||||
|
if sig == syscall.SIGHUP {
|
||||||
|
log.Printf("I! Reloading Telegraf config\n")
|
||||||
|
<-reload
|
||||||
|
reload <- true
|
||||||
|
close(shutdown)
|
||||||
|
}
|
||||||
|
case <-stop:
|
||||||
|
close(shutdown)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
log.Printf("I! Starting Telegraf %s\n", displayVersion())
|
||||||
log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " "))
|
|
||||||
log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " "))
|
|
||||||
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
|
||||||
|
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
|
||||||
log.Printf("I! Tags enabled: %s", c.ListTags())
|
log.Printf("I! Tags enabled: %s", c.ListTags())
|
||||||
|
|
||||||
if *fPidfile != "" {
|
if *fPidfile != "" {
|
||||||
|
@ -204,35 +229,46 @@ func runAgent(ctx context.Context,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ag.Run(ctx)
|
ag.Run(shutdown)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func usageExit(rc int) {
|
func usageExit(rc int) {
|
||||||
fmt.Println(internal.Usage)
|
fmt.Println(usage)
|
||||||
os.Exit(rc)
|
os.Exit(rc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatFullVersion() string {
|
type program struct {
|
||||||
var parts = []string{"Telegraf"}
|
inputFilters []string
|
||||||
|
outputFilters []string
|
||||||
|
aggregatorFilters []string
|
||||||
|
processorFilters []string
|
||||||
|
}
|
||||||
|
|
||||||
if version != "" {
|
func (p *program) Start(s service.Service) error {
|
||||||
parts = append(parts, version)
|
go p.run()
|
||||||
} else {
|
return nil
|
||||||
parts = append(parts, "unknown")
|
}
|
||||||
}
|
func (p *program) run() {
|
||||||
|
stop = make(chan struct{})
|
||||||
|
reloadLoop(
|
||||||
|
stop,
|
||||||
|
p.inputFilters,
|
||||||
|
p.outputFilters,
|
||||||
|
p.aggregatorFilters,
|
||||||
|
p.processorFilters,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func (p *program) Stop(s service.Service) error {
|
||||||
|
close(stop)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if branch != "" || commit != "" {
|
func displayVersion() string {
|
||||||
if branch == "" {
|
if version == "" {
|
||||||
branch = "unknown"
|
return fmt.Sprintf("v%s~%s", nextVersion, commit)
|
||||||
}
|
}
|
||||||
if commit == "" {
|
return "v" + version
|
||||||
commit = "unknown"
|
|
||||||
}
|
|
||||||
git := fmt.Sprintf("(git: %s %s)", branch, commit)
|
|
||||||
parts = append(parts, git)
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Join(parts, " ")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -240,10 +276,7 @@ func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
args := flag.Args()
|
args := flag.Args()
|
||||||
|
|
||||||
sectionFilters, inputFilters, outputFilters := []string{}, []string{}, []string{}
|
inputFilters, outputFilters := []string{}, []string{}
|
||||||
if *fSectionFilters != "" {
|
|
||||||
sectionFilters = strings.Split(":"+strings.TrimSpace(*fSectionFilters)+":", ":")
|
|
||||||
}
|
|
||||||
if *fInputFilters != "" {
|
if *fInputFilters != "" {
|
||||||
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
|
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
|
||||||
}
|
}
|
||||||
|
@ -259,16 +292,6 @@ func main() {
|
||||||
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
|
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.SetupLogging(logger.LogConfig{})
|
|
||||||
|
|
||||||
// Load external plugins, if requested.
|
|
||||||
if *fPlugins != "" {
|
|
||||||
log.Printf("I! Loading external plugins from: %s", *fPlugins)
|
|
||||||
if err := goplugin.LoadExternalPlugins(*fPlugins); err != nil {
|
|
||||||
log.Fatal("E! " + err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if *pprofAddr != "" {
|
if *pprofAddr != "" {
|
||||||
go func() {
|
go func() {
|
||||||
pprofHostPort := *pprofAddr
|
pprofHostPort := *pprofAddr
|
||||||
|
@ -289,11 +312,10 @@ func main() {
|
||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
switch args[0] {
|
switch args[0] {
|
||||||
case "version":
|
case "version":
|
||||||
fmt.Println(formatFullVersion())
|
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
|
||||||
return
|
return
|
||||||
case "config":
|
case "config":
|
||||||
config.PrintSampleConfig(
|
config.PrintSampleConfig(
|
||||||
sectionFilters,
|
|
||||||
inputFilters,
|
inputFilters,
|
||||||
outputFilters,
|
outputFilters,
|
||||||
aggregatorFilters,
|
aggregatorFilters,
|
||||||
|
@ -306,33 +328,22 @@ func main() {
|
||||||
// switch for flags which just do something and exit immediately
|
// switch for flags which just do something and exit immediately
|
||||||
switch {
|
switch {
|
||||||
case *fOutputList:
|
case *fOutputList:
|
||||||
fmt.Println("Available Output Plugins: ")
|
fmt.Println("Available Output Plugins:")
|
||||||
names := make([]string, 0, len(outputs.Outputs))
|
for k, _ := range outputs.Outputs {
|
||||||
for k := range outputs.Outputs {
|
|
||||||
names = append(names, k)
|
|
||||||
}
|
|
||||||
sort.Strings(names)
|
|
||||||
for _, k := range names {
|
|
||||||
fmt.Printf(" %s\n", k)
|
fmt.Printf(" %s\n", k)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case *fInputList:
|
case *fInputList:
|
||||||
fmt.Println("Available Input Plugins:")
|
fmt.Println("Available Input Plugins:")
|
||||||
names := make([]string, 0, len(inputs.Inputs))
|
for k, _ := range inputs.Inputs {
|
||||||
for k := range inputs.Inputs {
|
|
||||||
names = append(names, k)
|
|
||||||
}
|
|
||||||
sort.Strings(names)
|
|
||||||
for _, k := range names {
|
|
||||||
fmt.Printf(" %s\n", k)
|
fmt.Printf(" %s\n", k)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case *fVersion:
|
case *fVersion:
|
||||||
fmt.Println(formatFullVersion())
|
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
|
||||||
return
|
return
|
||||||
case *fSampleConfig:
|
case *fSampleConfig:
|
||||||
config.PrintSampleConfig(
|
config.PrintSampleConfig(
|
||||||
sectionFilters,
|
|
||||||
inputFilters,
|
inputFilters,
|
||||||
outputFilters,
|
outputFilters,
|
||||||
aggregatorFilters,
|
aggregatorFilters,
|
||||||
|
@ -348,20 +359,53 @@ func main() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
shortVersion := version
|
if runtime.GOOS == "windows" && !(*fRunAsConsole) {
|
||||||
if shortVersion == "" {
|
svcConfig := &service.Config{
|
||||||
shortVersion = "unknown"
|
Name: "telegraf",
|
||||||
|
DisplayName: "Telegraf Data Collector Service",
|
||||||
|
Description: "Collects data using a series of plugins and publishes it to" +
|
||||||
|
"another series of plugins.",
|
||||||
|
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure version
|
prg := &program{
|
||||||
if err := internal.SetVersion(shortVersion); err != nil {
|
inputFilters: inputFilters,
|
||||||
log.Println("Telegraf version already configured to: " + internal.Version())
|
outputFilters: outputFilters,
|
||||||
|
aggregatorFilters: aggregatorFilters,
|
||||||
|
processorFilters: processorFilters,
|
||||||
}
|
}
|
||||||
|
s, err := service.New(prg, svcConfig)
|
||||||
run(
|
if err != nil {
|
||||||
|
log.Fatal("E! " + err.Error())
|
||||||
|
}
|
||||||
|
// Handle the -service flag here to prevent any issues with tooling that
|
||||||
|
// may not have an interactive session, e.g. installing from Ansible.
|
||||||
|
if *fService != "" {
|
||||||
|
if *fConfig != "" {
|
||||||
|
(*svcConfig).Arguments = []string{"-config", *fConfig}
|
||||||
|
}
|
||||||
|
if *fConfigDirectory != "" {
|
||||||
|
(*svcConfig).Arguments = append((*svcConfig).Arguments, "-config-directory", *fConfigDirectory)
|
||||||
|
}
|
||||||
|
err := service.Control(s, *fService)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("E! " + err.Error())
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
} else {
|
||||||
|
err = s.Run()
|
||||||
|
if err != nil {
|
||||||
|
log.Println("E! " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stop = make(chan struct{})
|
||||||
|
reloadLoop(
|
||||||
|
stop,
|
||||||
inputFilters,
|
inputFilters,
|
||||||
outputFilters,
|
outputFilters,
|
||||||
aggregatorFilters,
|
aggregatorFilters,
|
||||||
processorFilters,
|
processorFilters,
|
||||||
)
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
|
|
||||||
stop = make(chan struct{})
|
|
||||||
reloadLoop(
|
|
||||||
inputFilters,
|
|
||||||
outputFilters,
|
|
||||||
aggregatorFilters,
|
|
||||||
processorFilters,
|
|
||||||
)
|
|
||||||
}
|
|
|
@ -1,124 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/logger"
|
|
||||||
"github.com/kardianos/service"
|
|
||||||
)
|
|
||||||
|
|
||||||
func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
|
|
||||||
if runtime.GOOS == "windows" && windowsRunAsService() {
|
|
||||||
runAsWindowsService(
|
|
||||||
inputFilters,
|
|
||||||
outputFilters,
|
|
||||||
aggregatorFilters,
|
|
||||||
processorFilters,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
stop = make(chan struct{})
|
|
||||||
reloadLoop(
|
|
||||||
inputFilters,
|
|
||||||
outputFilters,
|
|
||||||
aggregatorFilters,
|
|
||||||
processorFilters,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type program struct {
|
|
||||||
inputFilters []string
|
|
||||||
outputFilters []string
|
|
||||||
aggregatorFilters []string
|
|
||||||
processorFilters []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *program) Start(s service.Service) error {
|
|
||||||
go p.run()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (p *program) run() {
|
|
||||||
stop = make(chan struct{})
|
|
||||||
reloadLoop(
|
|
||||||
p.inputFilters,
|
|
||||||
p.outputFilters,
|
|
||||||
p.aggregatorFilters,
|
|
||||||
p.processorFilters,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
func (p *program) Stop(s service.Service) error {
|
|
||||||
close(stop)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) {
|
|
||||||
programFiles := os.Getenv("ProgramFiles")
|
|
||||||
if programFiles == "" { // Should never happen
|
|
||||||
programFiles = "C:\\Program Files"
|
|
||||||
}
|
|
||||||
svcConfig := &service.Config{
|
|
||||||
Name: *fServiceName,
|
|
||||||
DisplayName: *fServiceDisplayName,
|
|
||||||
Description: "Collects data using a series of plugins and publishes it to " +
|
|
||||||
"another series of plugins.",
|
|
||||||
Arguments: []string{"--config", programFiles + "\\Telegraf\\telegraf.conf"},
|
|
||||||
}
|
|
||||||
|
|
||||||
prg := &program{
|
|
||||||
inputFilters: inputFilters,
|
|
||||||
outputFilters: outputFilters,
|
|
||||||
aggregatorFilters: aggregatorFilters,
|
|
||||||
processorFilters: processorFilters,
|
|
||||||
}
|
|
||||||
s, err := service.New(prg, svcConfig)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("E! " + err.Error())
|
|
||||||
}
|
|
||||||
// Handle the --service flag here to prevent any issues with tooling that
|
|
||||||
// may not have an interactive session, e.g. installing from Ansible.
|
|
||||||
if *fService != "" {
|
|
||||||
if *fConfig != "" {
|
|
||||||
svcConfig.Arguments = []string{"--config", *fConfig}
|
|
||||||
}
|
|
||||||
if *fConfigDirectory != "" {
|
|
||||||
svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory)
|
|
||||||
}
|
|
||||||
//set servicename to service cmd line, to have a custom name after relaunch as a service
|
|
||||||
svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName)
|
|
||||||
|
|
||||||
err := service.Control(s, *fService)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("E! " + err.Error())
|
|
||||||
}
|
|
||||||
os.Exit(0)
|
|
||||||
} else {
|
|
||||||
winlogger, err := s.Logger(nil)
|
|
||||||
if err == nil {
|
|
||||||
//When in service mode, register eventlog target andd setup default logging to eventlog
|
|
||||||
logger.RegisterEventLogger(winlogger)
|
|
||||||
logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog})
|
|
||||||
}
|
|
||||||
err = s.Run()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Println("E! " + err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return true if Telegraf should create a Windows service.
|
|
||||||
func windowsRunAsService() bool {
|
|
||||||
if *fService != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if *fRunAsConsole {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return !service.Interactive()
|
|
||||||
}
|
|
|
@ -1,7 +0,0 @@
|
||||||
[[outputs.http]]
|
|
||||||
headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
|
|
||||||
taginclude = ["org_id"]
|
|
||||||
|
|
||||||
[[outputs.http]]
|
|
||||||
headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" }
|
|
||||||
taginclude = ["org_id"]
|
|
|
@ -1,2 +0,0 @@
|
||||||
[[inputs.http_listener_v2]]
|
|
||||||
not_a_field = true
|
|
|
@ -1,4 +0,0 @@
|
||||||
[[outputs.http]]
|
|
||||||
[outputs.http.headers]
|
|
||||||
Content-Type = "application/json"
|
|
||||||
taginclude = ["org_id"]
|
|
|
@ -1,5 +0,0 @@
|
||||||
[[outputs.http]]
|
|
||||||
scopes = [
|
|
||||||
# comment
|
|
||||||
"test" # comment
|
|
||||||
]
|
|
|
@ -1,9 +0,0 @@
|
||||||
[[inputs.http_listener_v2]]
|
|
||||||
write_timeout = "1s"
|
|
||||||
max_body_size = "1MiB"
|
|
||||||
tls_cert = """
|
|
||||||
/path/to/my/cert
|
|
||||||
"""
|
|
||||||
tls_key = '''
|
|
||||||
/path/to/my/key
|
|
||||||
'''
|
|
|
@ -1,2 +0,0 @@
|
||||||
[[inputs.http_listener_v2]]
|
|
||||||
port = "80"
|
|
|
@ -1,2 +0,0 @@
|
||||||
[[inputs.http_listener_v2]]
|
|
||||||
methods = "POST"
|
|
|
@ -1,88 +0,0 @@
|
||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Duration is a time.Duration
|
|
||||||
type Duration time.Duration
|
|
||||||
|
|
||||||
// Size is an int64
|
|
||||||
type Size int64
|
|
||||||
|
|
||||||
// Number is a float
|
|
||||||
type Number float64
|
|
||||||
|
|
||||||
// UnmarshalTOML parses the duration from the TOML config file
|
|
||||||
func (d Duration) UnmarshalTOML(b []byte) error {
|
|
||||||
var err error
|
|
||||||
b = bytes.Trim(b, `'`)
|
|
||||||
|
|
||||||
// see if we can directly convert it
|
|
||||||
dur, err := time.ParseDuration(string(b))
|
|
||||||
if err == nil {
|
|
||||||
d = Duration(dur)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse string duration, ie, "1s"
|
|
||||||
if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {
|
|
||||||
dur, err := time.ParseDuration(uq)
|
|
||||||
if err == nil {
|
|
||||||
d = Duration(dur)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// First try parsing as integer seconds
|
|
||||||
sI, err := strconv.ParseInt(string(b), 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
dur := time.Second * time.Duration(sI)
|
|
||||||
d = Duration(dur)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Second try parsing as float seconds
|
|
||||||
sF, err := strconv.ParseFloat(string(b), 64)
|
|
||||||
if err == nil {
|
|
||||||
dur := time.Second * time.Duration(sF)
|
|
||||||
d = Duration(dur)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s Size) UnmarshalTOML(b []byte) error {
|
|
||||||
var err error
|
|
||||||
b = bytes.Trim(b, `'`)
|
|
||||||
|
|
||||||
val, err := strconv.ParseInt(string(b), 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
s = Size(val)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
uq, err := strconv.Unquote(string(b))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
val, err = units.ParseStrictBytes(uq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s = Size(val)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n Number) UnmarshalTOML(b []byte) error {
|
|
||||||
value, err := strconv.ParseFloat(string(b), 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
n = Number(value)
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -17,20 +17,19 @@ services:
|
||||||
- KAFKA_ADVERTISED_HOST_NAME=localhost
|
- KAFKA_ADVERTISED_HOST_NAME=localhost
|
||||||
- KAFKA_ADVERTISED_PORT=9092
|
- KAFKA_ADVERTISED_PORT=9092
|
||||||
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
|
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
|
||||||
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
|
- KAFKA_CREATE_TOPICS="test:1:1"
|
||||||
- JAVA_OPTS="-Xms256m -Xmx256m"
|
- JAVA_OPTS="-Xms256m -Xmx256m"
|
||||||
ports:
|
ports:
|
||||||
- "9092:9092"
|
- "9092:9092"
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper
|
- zookeeper
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
|
image: elasticsearch:5
|
||||||
environment:
|
environment:
|
||||||
- "ES_JAVA_OPTS=-Xms256m -Xmx256m"
|
- JAVA_OPTS="-Xms256m -Xmx256m"
|
||||||
- discovery.type=single-node
|
|
||||||
- xpack.security.enabled=false
|
|
||||||
ports:
|
ports:
|
||||||
- "9200:9200"
|
- "9200:9200"
|
||||||
|
- "9300:9300"
|
||||||
mysql:
|
mysql:
|
||||||
image: mysql
|
image: mysql
|
||||||
environment:
|
environment:
|
||||||
|
@ -41,17 +40,8 @@ services:
|
||||||
image: memcached
|
image: memcached
|
||||||
ports:
|
ports:
|
||||||
- "11211:11211"
|
- "11211:11211"
|
||||||
pgbouncer:
|
|
||||||
image: mbentley/ubuntu-pgbouncer
|
|
||||||
environment:
|
|
||||||
- PG_ENV_POSTGRESQL_USER=pgbouncer
|
|
||||||
- PG_ENV_POSTGRESQL_PASS=pgbouncer
|
|
||||||
ports:
|
|
||||||
- "6432:6432"
|
|
||||||
postgres:
|
postgres:
|
||||||
image: postgres:alpine
|
image: postgres:alpine
|
||||||
environment:
|
|
||||||
- POSTGRES_HOST_AUTH_METHOD=trust
|
|
||||||
ports:
|
ports:
|
||||||
- "5432:5432"
|
- "5432:5432"
|
||||||
rabbitmq:
|
rabbitmq:
|
||||||
|
@ -93,10 +83,11 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- "4200:4200"
|
- "4200:4200"
|
||||||
- "4230:4230"
|
- "4230:4230"
|
||||||
- "6543:5432"
|
|
||||||
command:
|
command:
|
||||||
- crate
|
- crate
|
||||||
- -Cnetwork.host=0.0.0.0
|
- -Cnetwork.host=0.0.0.0
|
||||||
- -Ctransport.host=localhost
|
- -Ctransport.host=localhost
|
||||||
|
- -Clicense.enterprise=false
|
||||||
environment:
|
environment:
|
||||||
- CRATE_HEAP_SIZE=128m
|
- CRATE_HEAP_SIZE=128m
|
||||||
|
- JAVA_OPTS='-Xms256m -Xmx256m'
|
||||||
|
|
|
@ -1,132 +0,0 @@
|
||||||
### Aggregator Plugins
|
|
||||||
|
|
||||||
This section is for developers who want to create a new aggregator plugin.
|
|
||||||
|
|
||||||
### Aggregator Plugin Guidelines
|
|
||||||
|
|
||||||
* A aggregator must conform to the [telegraf.Aggregator][] interface.
|
|
||||||
* Aggregators should call `aggregators.Add` in their `init` function to
|
|
||||||
register themselves. See below for a quick example.
|
|
||||||
* To be available within Telegraf itself, plugins must add themselves to the
|
|
||||||
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
|
|
||||||
- The `SampleConfig` function should return valid toml that describes how the
|
|
||||||
plugin can be configured. This is included in `telegraf config`. Please
|
|
||||||
consult the [SampleConfig][] page for the latest style guidelines.
|
|
||||||
* The `Description` function should say in one line what this aggregator does.
|
|
||||||
* The Aggregator plugin will need to keep caches of metrics that have passed
|
|
||||||
through it. This should be done using the builtin `HashID()` function of
|
|
||||||
each metric.
|
|
||||||
* When the `Reset()` function is called, all caches should be cleared.
|
|
||||||
- Follow the recommended [CodeStyle][].
|
|
||||||
|
|
||||||
### Aggregator Plugin Example
|
|
||||||
|
|
||||||
```go
|
|
||||||
package min
|
|
||||||
|
|
||||||
// min.go
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Min struct {
|
|
||||||
// caches for metric fields, names, and tags
|
|
||||||
fieldCache map[uint64]map[string]float64
|
|
||||||
nameCache map[uint64]string
|
|
||||||
tagCache map[uint64]map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMin() telegraf.Aggregator {
|
|
||||||
m := &Min{}
|
|
||||||
m.Reset()
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
## period is the flush & clear interval of the aggregator.
|
|
||||||
period = "30s"
|
|
||||||
## If true drop_original will drop the original metrics and
|
|
||||||
## only send aggregates.
|
|
||||||
drop_original = false
|
|
||||||
`
|
|
||||||
|
|
||||||
func (m *Min) Init() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Min) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Min) Description() string {
|
|
||||||
return "Keep the aggregate min of each metric passing through."
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Min) Add(in telegraf.Metric) {
|
|
||||||
id := in.HashID()
|
|
||||||
if _, ok := m.nameCache[id]; !ok {
|
|
||||||
// hit an uncached metric, create caches for first time:
|
|
||||||
m.nameCache[id] = in.Name()
|
|
||||||
m.tagCache[id] = in.Tags()
|
|
||||||
m.fieldCache[id] = make(map[string]float64)
|
|
||||||
for k, v := range in.Fields() {
|
|
||||||
if fv, ok := convert(v); ok {
|
|
||||||
m.fieldCache[id][k] = fv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for k, v := range in.Fields() {
|
|
||||||
if fv, ok := convert(v); ok {
|
|
||||||
if _, ok := m.fieldCache[id][k]; !ok {
|
|
||||||
// hit an uncached field of a cached metric
|
|
||||||
m.fieldCache[id][k] = fv
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if fv < m.fieldCache[id][k] {
|
|
||||||
// set new minimum
|
|
||||||
m.fieldCache[id][k] = fv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Min) Push(acc telegraf.Accumulator) {
|
|
||||||
for id, _ := range m.nameCache {
|
|
||||||
fields := map[string]interface{}{}
|
|
||||||
for k, v := range m.fieldCache[id] {
|
|
||||||
fields[k+"_min"] = v
|
|
||||||
}
|
|
||||||
acc.AddFields(m.nameCache[id], fields, m.tagCache[id])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Min) Reset() {
|
|
||||||
m.fieldCache = make(map[uint64]map[string]float64)
|
|
||||||
m.nameCache = make(map[uint64]string)
|
|
||||||
m.tagCache = make(map[uint64]map[string]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
func convert(in interface{}) (float64, bool) {
|
|
||||||
switch v := in.(type) {
|
|
||||||
case float64:
|
|
||||||
return v, true
|
|
||||||
case int64:
|
|
||||||
return float64(v), true
|
|
||||||
default:
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
aggregators.Add("min", func() telegraf.Aggregator {
|
|
||||||
return NewMin()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
[telegraf.Aggregator]: https://godoc.org/github.com/influxdata/telegraf#Aggregator
|
|
||||||
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
|
|
||||||
[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle
|
|
|
@ -44,15 +44,13 @@ to control which metrics are passed through a processor or aggregator. If a
|
||||||
metric is filtered out the metric bypasses the plugin and is passed downstream
|
metric is filtered out the metric bypasses the plugin and is passed downstream
|
||||||
to the next plugin.
|
to the next plugin.
|
||||||
|
|
||||||
### Processor
|
**Processor** plugins process metrics as they pass through and immediately emit
|
||||||
Processor plugins process metrics as they pass through and immediately emit
|
|
||||||
results based on the values they process. For example, this could be printing
|
results based on the values they process. For example, this could be printing
|
||||||
all metrics or adding a tag to all metrics that pass through.
|
all metrics or adding a tag to all metrics that pass through.
|
||||||
|
|
||||||
### Aggregator
|
**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators
|
||||||
Aggregator plugins, on the other hand, are a bit more complicated. Aggregators
|
|
||||||
are typically for emitting new _aggregate_ metrics, such as a running mean,
|
are typically for emitting new _aggregate_ metrics, such as a running mean,
|
||||||
minimum, maximum, or standard deviation. For this reason, all _aggregator_
|
minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_
|
||||||
plugins are configured with a `period`. The `period` is the size of the window
|
plugins are configured with a `period`. The `period` is the size of the window
|
||||||
of metrics that each _aggregate_ represents. In other words, the emitted
|
of metrics that each _aggregate_ represents. In other words, the emitted
|
||||||
_aggregate_ metric will be the aggregated value of the past `period` seconds.
|
_aggregate_ metric will be the aggregated value of the past `period` seconds.
|
||||||
|
@ -60,8 +58,7 @@ Since many users will only care about their aggregates and not every single metr
|
||||||
gathered, there is also a `drop_original` argument, which tells Telegraf to only
|
gathered, there is also a `drop_original` argument, which tells Telegraf to only
|
||||||
emit the aggregates and not the original metrics.
|
emit the aggregates and not the original metrics.
|
||||||
|
|
||||||
Since aggregates are created for each measurement, field, and unique tag combination
|
**NOTE** That since aggregators only aggregate metrics within their period, that
|
||||||
the plugin receives, you can make use of `taginclude` to group
|
historical data is not supported. In other words, if your metric timestamp is more
|
||||||
aggregates by specific tags only.
|
than `now() - period` in the past, it will not be aggregated. If this is a feature
|
||||||
|
that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992)
|
||||||
**Note:** Aggregator plugins only aggregate metrics within their periods (`now() - period`). Data with a timestamp earlier than `now() - period` cannot be included.
|
|
||||||
|
|
|
@ -1,25 +1,33 @@
|
||||||
# Configuration
|
# Telegraf Configuration
|
||||||
|
|
||||||
Telegraf's configuration file is written using [TOML][] and is composed of
|
You can see the latest config file with all available plugins here:
|
||||||
three sections: [global tags][], [agent][] settings, and [plugins][].
|
[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf)
|
||||||
|
|
||||||
View the default [telegraf.conf][] config file with all available plugins.
|
## Generating a Configuration File
|
||||||
|
|
||||||
### Generating a Configuration File
|
A default Telegraf config file can be auto-generated by telegraf:
|
||||||
|
|
||||||
A default config file can be generated by telegraf:
|
```
|
||||||
```sh
|
|
||||||
telegraf config > telegraf.conf
|
telegraf config > telegraf.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
To generate a file with specific inputs and outputs, you can use the
|
To generate a file with specific inputs and outputs, you can use the
|
||||||
--input-filter and --output-filter flags:
|
--input-filter and --output-filter flags:
|
||||||
|
|
||||||
```sh
|
```
|
||||||
telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config
|
telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configuration Loading
|
## Environment Variables
|
||||||
|
|
||||||
|
Environment variables can be used anywhere in the config file, simply prepend
|
||||||
|
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
|
||||||
|
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||||
|
|
||||||
|
When using the `.deb` or `.rpm` packages, you can define environment variables
|
||||||
|
in the `/etc/default/telegraf` file.
|
||||||
|
|
||||||
|
## Configuration file locations
|
||||||
|
|
||||||
The location of the configuration file can be set via the `--config` command
|
The location of the configuration file can be set via the `--config` command
|
||||||
line flag.
|
line flag.
|
||||||
|
@ -32,431 +40,180 @@ On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
|
||||||
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
|
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
|
||||||
configuration files.
|
configuration files.
|
||||||
|
|
||||||
### Environment Variables
|
# Global Tags
|
||||||
|
|
||||||
Environment variables can be used anywhere in the config file, simply surround
|
Global tags can be specified in the `[global_tags]` section of the config file
|
||||||
them with `${}`. Replacement occurs before file parsing. For strings
|
in key="value" format. All metrics being gathered on this host will be tagged
|
||||||
the variable must be within quotes, e.g., `"${STR_VAR}"`, for numbers and booleans
|
with the tags specified here.
|
||||||
they should be unquoted, e.g., `${INT_VAR}`, `${BOOL_VAR}`.
|
|
||||||
|
|
||||||
When using the `.deb` or `.rpm` packages, you can define environment variables
|
## Agent Configuration
|
||||||
in the `/etc/default/telegraf` file.
|
|
||||||
|
|
||||||
**Example**:
|
Telegraf has a few options you can configure under the `[agent]` section of the
|
||||||
|
config.
|
||||||
`/etc/default/telegraf`:
|
|
||||||
```
|
|
||||||
USER="alice"
|
|
||||||
INFLUX_URL="http://localhost:8086"
|
|
||||||
INFLUX_SKIP_DATABASE_CREATION="true"
|
|
||||||
INFLUX_PASSWORD="monkey123"
|
|
||||||
```
|
|
||||||
|
|
||||||
`/etc/telegraf.conf`:
|
|
||||||
```toml
|
|
||||||
[global_tags]
|
|
||||||
user = "${USER}"
|
|
||||||
|
|
||||||
[[inputs.mem]]
|
|
||||||
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
urls = ["${INFLUX_URL}"]
|
|
||||||
skip_database_creation = ${INFLUX_SKIP_DATABASE_CREATION}
|
|
||||||
password = "${INFLUX_PASSWORD}"
|
|
||||||
```
|
|
||||||
|
|
||||||
The above files will produce the following effective configuration file to be
|
|
||||||
parsed:
|
|
||||||
```toml
|
|
||||||
[global_tags]
|
|
||||||
user = "alice"
|
|
||||||
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
urls = "http://localhost:8086"
|
|
||||||
skip_database_creation = true
|
|
||||||
password = "monkey123"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Intervals
|
|
||||||
|
|
||||||
Intervals are durations of time and can be specified for supporting settings by
|
|
||||||
combining an integer value and time unit as a string value. Valid time units are
|
|
||||||
`ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.
|
|
||||||
```toml
|
|
||||||
[agent]
|
|
||||||
interval = "10s"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Global Tags
|
|
||||||
|
|
||||||
Global tags can be specified in the `[global_tags]` table in key="value"
|
|
||||||
format. All metrics that are gathered will be tagged with the tags specified.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[global_tags]
|
|
||||||
dc = "us-east-1"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Agent
|
|
||||||
|
|
||||||
The agent table configures Telegraf and the defaults used across all plugins.
|
|
||||||
|
|
||||||
- **interval**: Default data collection [interval][] for all inputs.
|
|
||||||
|
|
||||||
- **round_interval**: Rounds collection interval to [interval][]
|
|
||||||
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
|
||||||
|
|
||||||
- **metric_batch_size**:
|
|
||||||
Telegraf will send metrics to outputs in batches of at most
|
|
||||||
metric_batch_size metrics.
|
|
||||||
This controls the size of writes that Telegraf sends to output plugins.
|
|
||||||
|
|
||||||
- **metric_buffer_limit**:
|
|
||||||
Maximum number of unwritten metrics per output. Increasing this value
|
|
||||||
allows for longer periods of output downtime without dropping metrics at the
|
|
||||||
cost of higher maximum memory usage.
|
|
||||||
|
|
||||||
- **collection_jitter**:
|
|
||||||
Collection jitter is used to jitter the collection by a random [interval][].
|
|
||||||
Each plugin will sleep for a random time within jitter before collecting.
|
|
||||||
This can be used to avoid many plugins querying things like sysfs at the
|
|
||||||
same time, which can have a measurable effect on the system.
|
|
||||||
|
|
||||||
- **flush_interval**:
|
|
||||||
Default flushing [interval][] for all outputs. Maximum flush_interval will be
|
|
||||||
flush_interval + flush_jitter.
|
|
||||||
|
|
||||||
- **flush_jitter**:
|
|
||||||
Default flush jitter for all outputs. This jitters the flush [interval][]
|
|
||||||
by a random amount. This is primarily to avoid large write spikes for users
|
|
||||||
running a large number of telegraf instances. ie, a jitter of 5s and interval
|
|
||||||
10s means flushes will happen every 10-15s.
|
|
||||||
|
|
||||||
|
|
||||||
- **precision**:
|
|
||||||
Collected metrics are rounded to the precision specified as an [interval][].
|
|
||||||
|
|
||||||
|
* **interval**: Default data collection interval for all inputs
|
||||||
|
* **round_interval**: Rounds collection interval to 'interval'
|
||||||
|
ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||||
|
* **metric_batch_size**: Telegraf will send metrics to output in batch of at
|
||||||
|
most metric_batch_size metrics.
|
||||||
|
* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics
|
||||||
|
for each output, and will flush this buffer on a successful write.
|
||||||
|
This should be a multiple of metric_batch_size and could not be less
|
||||||
|
than 2 times metric_batch_size.
|
||||||
|
* **collection_jitter**: Collection jitter is used to jitter
|
||||||
|
the collection by a random amount.
|
||||||
|
Each plugin will sleep for a random time within jitter before collecting.
|
||||||
|
This can be used to avoid many plugins querying things like sysfs at the
|
||||||
|
same time, which can have a measurable effect on the system.
|
||||||
|
* **flush_interval**: Default data flushing interval for all outputs.
|
||||||
|
You should not set this below
|
||||||
|
interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||||
|
* **flush_jitter**: Jitter the flush interval by a random amount.
|
||||||
|
This is primarily to avoid
|
||||||
|
large write spikes for users running a large number of telegraf instances.
|
||||||
|
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
|
||||||
|
* **precision**:
|
||||||
|
By default or when set to "0s", precision will be set to the same
|
||||||
|
timestamp order as the collection interval, with the maximum being 1s.
|
||||||
Precision will NOT be used for service inputs. It is up to each individual
|
Precision will NOT be used for service inputs. It is up to each individual
|
||||||
service input to set the timestamp at the appropriate precision.
|
service input to set the timestamp at the appropriate precision.
|
||||||
|
Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
||||||
|
|
||||||
- **debug**:
|
* **logfile**: Specify the log file name. The empty string means to log to stderr.
|
||||||
Log at debug level.
|
* **debug**: Run telegraf in debug mode.
|
||||||
|
* **quiet**: Run telegraf in quiet mode (error messages only).
|
||||||
|
* **hostname**: Override default hostname, if empty use os.Hostname().
|
||||||
|
* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent.
|
||||||
|
|
||||||
- **quiet**:
|
## Input Configuration
|
||||||
Log only error level messages.
|
|
||||||
|
|
||||||
- **logtarget**:
|
The following config parameters are available for all inputs:
|
||||||
Log target controls the destination for logs and can be one of "file",
|
|
||||||
"stderr" or, on Windows, "eventlog". When set to "file", the output file is
|
|
||||||
determined by the "logfile" setting.
|
|
||||||
|
|
||||||
- **logfile**:
|
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||||
Name of the file to be logged to when using the "file" logtarget. If set to
|
global interval, but if one particular input should be run less or more often,
|
||||||
the empty string then logs are written to stderr.
|
you can configure that here.
|
||||||
|
* **name_override**: Override the base name of the measurement.
|
||||||
|
(Default is the name of the input).
|
||||||
|
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||||
|
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||||
|
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||||
|
|
||||||
|
The [measurement filtering](#measurement-filtering) parameters can be used to
|
||||||
|
limit what metrics are emitted from the input plugin.
|
||||||
|
|
||||||
- **logfile_rotation_interval**:
|
## Output Configuration
|
||||||
The logfile will be rotated after the time interval specified. When set to
|
|
||||||
0 no time based rotation is performed.
|
|
||||||
|
|
||||||
- **logfile_rotation_max_size**:
|
The [measurement filtering](#measurement-filtering) parameters can be used to
|
||||||
The logfile will be rotated when it becomes larger than the specified size.
|
limit what metrics are emitted from the output plugin.
|
||||||
When set to 0 no size based rotation is performed.
|
|
||||||
|
|
||||||
- **logfile_rotation_max_archives**:
|
## Aggregator Configuration
|
||||||
Maximum number of rotated archives to keep, any older logs are deleted. If
|
|
||||||
set to -1, no archives are removed.
|
|
||||||
|
|
||||||
- **hostname**:
|
The following config parameters are available for all aggregators:
|
||||||
Override default hostname, if empty use os.Hostname()
|
|
||||||
- **omit_hostname**:
|
|
||||||
If set to true, do no set the "host" tag in the telegraf agent.
|
|
||||||
|
|
||||||
### Plugins
|
* **period**: The period on which to flush & clear each aggregator. All metrics
|
||||||
|
that are sent with timestamps outside of this period will be ignored by the
|
||||||
|
aggregator.
|
||||||
|
* **delay**: The delay before each aggregator is flushed. This is to control
|
||||||
|
how long for aggregators to wait before receiving metrics from input plugins,
|
||||||
|
in the case that aggregators are flushing and inputs are gathering on the
|
||||||
|
same interval.
|
||||||
|
* **drop_original**: If true, the original metric will be dropped by the
|
||||||
|
aggregator and will not get sent to the output plugins.
|
||||||
|
* **name_override**: Override the base name of the measurement.
|
||||||
|
(Default is the name of the input).
|
||||||
|
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||||
|
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||||
|
* **tags**: A map of tags to apply to a specific input's measurements.
|
||||||
|
|
||||||
Telegraf plugins are divided into 4 types: [inputs][], [outputs][],
|
The [measurement filtering](#measurement-filtering) parameters can be used to
|
||||||
[processors][], and [aggregators][].
|
limit what metrics are handled by the aggregator. Excluded metrics are passed
|
||||||
|
downstream to the next aggregator.
|
||||||
|
|
||||||
Unlike the `global_tags` and `agent` tables, any plugin can be defined
|
## Processor Configuration
|
||||||
multiple times and each instance will run independently. This allows you to
|
|
||||||
have plugins defined with differing configurations as needed within a single
|
|
||||||
Telegraf process.
|
|
||||||
|
|
||||||
Each plugin has a unique set of configuration options, reference the
|
The following config parameters are available for all processors:
|
||||||
sample configuration for details. Additionally, several options are available
|
|
||||||
on any plugin depending on its type.
|
|
||||||
|
|
||||||
### Input Plugins
|
* **order**: This is the order in which the processor(s) get executed. If this
|
||||||
|
is not specified then processor execution order will be random.
|
||||||
|
|
||||||
Input plugins gather and create metrics. They support both polling and event
|
The [measurement filtering](#measurement-filtering) parameters can be used
|
||||||
driven operation.
|
to limit what metrics are handled by the processor. Excluded metrics are
|
||||||
|
passed downstream to the next processor.
|
||||||
|
|
||||||
Parameters that can be used with any input plugin:
|
#### Measurement Filtering
|
||||||
|
|
||||||
- **alias**: Name an instance of a plugin.
|
Filters can be configured per input, output, processor, or aggregator,
|
||||||
- **interval**: How often to gather this metric. Normal plugins use a single
|
see below for examples.
|
||||||
global interval, but if one particular input should be run less or more
|
|
||||||
often, you can configure that here.
|
|
||||||
- **name_override**: Override the base name of the measurement. (Default is
|
|
||||||
the name of the input).
|
|
||||||
- **name_prefix**: Specifies a prefix to attach to the measurement name.
|
|
||||||
- **name_suffix**: Specifies a suffix to attach to the measurement name.
|
|
||||||
- **tags**: A map of tags to apply to a specific input's measurements.
|
|
||||||
|
|
||||||
The [metric filtering][] parameters can be used to limit what metrics are
|
* **namepass**:
|
||||||
emitted from the input plugin.
|
An array of glob pattern strings. Only points whose measurement name matches
|
||||||
|
a pattern in this list are emitted.
|
||||||
|
* **namedrop**:
|
||||||
|
The inverse of `namepass`. If a match is found the point is discarded. This
|
||||||
|
is tested on points after they have passed the `namepass` test.
|
||||||
|
* **fieldpass**:
|
||||||
|
An array of glob pattern strings. Only fields whose field key matches a
|
||||||
|
pattern in this list are emitted. Not available for outputs.
|
||||||
|
* **fielddrop**:
|
||||||
|
The inverse of `fieldpass`. Fields with a field key matching one of the
|
||||||
|
patterns will be discarded from the point. This is tested on points after
|
||||||
|
they have passed the `fieldpass` test. Not available for outputs.
|
||||||
|
* **tagpass**:
|
||||||
|
A table mapping tag keys to arrays of glob pattern strings. Only points
|
||||||
|
that contain a tag key in the table and a tag value matching one of its
|
||||||
|
patterns is emitted.
|
||||||
|
* **tagdrop**:
|
||||||
|
The inverse of `tagpass`. If a match is found the point is discarded. This
|
||||||
|
is tested on points after they have passed the `tagpass` test.
|
||||||
|
* **taginclude**:
|
||||||
|
An array of glob pattern strings. Only tags with a tag key matching one of
|
||||||
|
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
|
||||||
|
point based on its tag, `taginclude` removes all non matching tags from the
|
||||||
|
point. This filter can be used on both inputs & outputs, but it is
|
||||||
|
_recommended_ to be used on inputs, as it is more efficient to filter out tags
|
||||||
|
at the ingestion point.
|
||||||
|
* **tagexclude**:
|
||||||
|
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
|
||||||
|
will be discarded from the point.
|
||||||
|
|
||||||
#### Examples
|
**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
|
||||||
|
must be defined at the _end_ of the plugin definition, otherwise subsequent
|
||||||
|
plugin config options will be interpreted as part of the tagpass/tagdrop
|
||||||
|
tables.
|
||||||
|
|
||||||
|
#### Input Configuration Examples
|
||||||
|
|
||||||
|
This is a full working config that will output CPU data to an InfluxDB instance
|
||||||
|
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||||
|
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||||
|
fields which begin with `time_`.
|
||||||
|
|
||||||
Use the name_suffix parameter to emit measurements with the name `cpu_total`:
|
|
||||||
```toml
|
```toml
|
||||||
[[inputs.cpu]]
|
[global_tags]
|
||||||
name_suffix = "_total"
|
dc = "denver-1"
|
||||||
percpu = false
|
|
||||||
totalcpu = true
|
|
||||||
```
|
|
||||||
|
|
||||||
Use the name_override parameter to emit measurements with the name `foobar`:
|
[agent]
|
||||||
```toml
|
interval = "10s"
|
||||||
[[inputs.cpu]]
|
|
||||||
name_override = "foobar"
|
|
||||||
percpu = false
|
|
||||||
totalcpu = true
|
|
||||||
```
|
|
||||||
|
|
||||||
Emit measurements with two additional tags: `tag1=foo` and `tag2=bar`
|
# OUTPUTS
|
||||||
|
[[outputs.influxdb]]
|
||||||
> **NOTE**: With TOML, order matters. Parameters belong to the last defined
|
url = "http://192.168.59.103:8086" # required.
|
||||||
> table header, place `[inputs.cpu.tags]` table at the _end_ of the plugin
|
database = "telegraf" # required.
|
||||||
> definition.
|
|
||||||
```toml
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = false
|
|
||||||
totalcpu = true
|
|
||||||
[inputs.cpu.tags]
|
|
||||||
tag1 = "foo"
|
|
||||||
tag2 = "bar"
|
|
||||||
```
|
|
||||||
|
|
||||||
Utilize `name_override`, `name_prefix`, or `name_suffix` config options to
|
|
||||||
avoid measurement collisions when defining multiple plugins:
|
|
||||||
```toml
|
|
||||||
[[inputs.cpu]]
|
|
||||||
percpu = false
|
|
||||||
totalcpu = true
|
|
||||||
|
|
||||||
|
# INPUTS
|
||||||
[[inputs.cpu]]
|
[[inputs.cpu]]
|
||||||
percpu = true
|
percpu = true
|
||||||
totalcpu = false
|
totalcpu = false
|
||||||
name_override = "percpu_usage"
|
# filter all fields beginning with 'time_'
|
||||||
fielddrop = ["cpu_time*"]
|
fielddrop = ["time_*"]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Output Plugins
|
#### Input Config: tagpass and tagdrop
|
||||||
|
|
||||||
Output plugins write metrics to a location. Outputs commonly write to
|
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
|
||||||
databases, network services, and messaging systems.
|
the plugin definition, otherwise subsequent plugin config options will be
|
||||||
|
interpreted as part of the tagpass/tagdrop map.
|
||||||
|
|
||||||
Parameters that can be used with any output plugin:
|
|
||||||
|
|
||||||
- **alias**: Name an instance of a plugin.
|
|
||||||
- **flush_interval**: The maximum time between flushes. Use this setting to
|
|
||||||
override the agent `flush_interval` on a per plugin basis.
|
|
||||||
- **flush_jitter**: The amount of time to jitter the flush interval. Use this
|
|
||||||
setting to override the agent `flush_jitter` on a per plugin basis.
|
|
||||||
- **metric_batch_size**: The maximum number of metrics to send at once. Use
|
|
||||||
this setting to override the agent `metric_batch_size` on a per plugin basis.
|
|
||||||
- **metric_buffer_limit**: The maximum number of unsent metrics to buffer.
|
|
||||||
Use this setting to override the agent `metric_buffer_limit` on a per plugin
|
|
||||||
basis.
|
|
||||||
- **name_override**: Override the original name of the measurement.
|
|
||||||
- **name_prefix**: Specifies a prefix to attach to the measurement name.
|
|
||||||
- **name_suffix**: Specifies a suffix to attach to the measurement name.
|
|
||||||
|
|
||||||
The [metric filtering][] parameters can be used to limit what metrics are
|
|
||||||
emitted from the output plugin.
|
|
||||||
|
|
||||||
#### Examples
|
|
||||||
|
|
||||||
Override flush parameters for a single output:
|
|
||||||
```toml
|
|
||||||
[agent]
|
|
||||||
flush_interval = "10s"
|
|
||||||
flush_jitter = "5s"
|
|
||||||
metric_batch_size = 1000
|
|
||||||
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
urls = [ "http://example.org:8086" ]
|
|
||||||
database = "telegraf"
|
|
||||||
|
|
||||||
[[outputs.file]]
|
|
||||||
files = [ "stdout" ]
|
|
||||||
flush_interval = "1s"
|
|
||||||
flush_jitter = "1s"
|
|
||||||
metric_batch_size = 10
|
|
||||||
```
|
|
||||||
|
|
||||||
### Processor Plugins
|
|
||||||
|
|
||||||
Processor plugins perform processing tasks on metrics and are commonly used to
|
|
||||||
rename or apply transformations to metrics. Processors are applied after the
|
|
||||||
input plugins and before any aggregator plugins.
|
|
||||||
|
|
||||||
Parameters that can be used with any processor plugin:
|
|
||||||
|
|
||||||
- **alias**: Name an instance of a plugin.
|
|
||||||
- **order**: The order in which the processor(s) are executed. If this is not
|
|
||||||
specified then processor execution order will be random.
|
|
||||||
|
|
||||||
The [metric filtering][] parameters can be used to limit what metrics are
|
|
||||||
handled by the processor. Excluded metrics are passed downstream to the next
|
|
||||||
processor.
|
|
||||||
|
|
||||||
#### Examples
|
|
||||||
|
|
||||||
If the order processors are applied matters you must set order on all involved
|
|
||||||
processors:
|
|
||||||
```toml
|
|
||||||
[[processors.rename]]
|
|
||||||
order = 1
|
|
||||||
[[processors.rename.replace]]
|
|
||||||
tag = "path"
|
|
||||||
dest = "resource"
|
|
||||||
|
|
||||||
[[processors.strings]]
|
|
||||||
order = 2
|
|
||||||
[[processors.strings.trim_prefix]]
|
|
||||||
tag = "resource"
|
|
||||||
prefix = "/api/"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Aggregator Plugins
|
|
||||||
|
|
||||||
Aggregator plugins produce new metrics after examining metrics over a time
|
|
||||||
period, as the name suggests they are commonly used to produce new aggregates
|
|
||||||
such as mean/max/min metrics. Aggregators operate on metrics after any
|
|
||||||
processors have been applied.
|
|
||||||
|
|
||||||
Parameters that can be used with any aggregator plugin:
|
|
||||||
|
|
||||||
- **alias**: Name an instance of a plugin.
|
|
||||||
- **period**: The period on which to flush & clear each aggregator. All
|
|
||||||
metrics that are sent with timestamps outside of this period will be ignored
|
|
||||||
by the aggregator.
|
|
||||||
- **delay**: The delay before each aggregator is flushed. This is to control
|
|
||||||
how long for aggregators to wait before receiving metrics from input
|
|
||||||
plugins, in the case that aggregators are flushing and inputs are gathering
|
|
||||||
on the same interval.
|
|
||||||
- **grace**: The duration when the metrics will still be aggregated
|
|
||||||
by the plugin, even though they're outside of the aggregation period. This
|
|
||||||
is needed in a situation when the agent is expected to receive late metrics
|
|
||||||
and it's acceptable to roll them up into next aggregation period.
|
|
||||||
- **drop_original**: If true, the original metric will be dropped by the
|
|
||||||
aggregator and will not get sent to the output plugins.
|
|
||||||
- **name_override**: Override the base name of the measurement. (Default is
|
|
||||||
the name of the input).
|
|
||||||
- **name_prefix**: Specifies a prefix to attach to the measurement name.
|
|
||||||
- **name_suffix**: Specifies a suffix to attach to the measurement name.
|
|
||||||
- **tags**: A map of tags to apply to a specific input's measurements.
|
|
||||||
|
|
||||||
The [metric filtering][] parameters can be used to limit what metrics are
|
|
||||||
handled by the aggregator. Excluded metrics are passed downstream to the next
|
|
||||||
aggregator.
|
|
||||||
|
|
||||||
#### Examples
|
|
||||||
|
|
||||||
Collect and emit the min/max of the system load1 metric every 30s, dropping
|
|
||||||
the originals.
|
|
||||||
```toml
|
|
||||||
[[inputs.system]]
|
|
||||||
fieldpass = ["load1"] # collects system load1 metric.
|
|
||||||
|
|
||||||
[[aggregators.minmax]]
|
|
||||||
period = "30s" # send & clear the aggregate every 30s.
|
|
||||||
drop_original = true # drop the original metrics.
|
|
||||||
|
|
||||||
[[outputs.file]]
|
|
||||||
files = ["stdout"]
|
|
||||||
```
|
|
||||||
|
|
||||||
Collect and emit the min/max of the swap metrics every 30s, dropping the
|
|
||||||
originals. The aggregator will not be applied to the system load metrics due
|
|
||||||
to the `namepass` parameter.
|
|
||||||
```toml
|
|
||||||
[[inputs.swap]]
|
|
||||||
|
|
||||||
[[inputs.system]]
|
|
||||||
fieldpass = ["load1"] # collects system load1 metric.
|
|
||||||
|
|
||||||
[[aggregators.minmax]]
|
|
||||||
period = "30s" # send & clear the aggregate every 30s.
|
|
||||||
drop_original = true # drop the original metrics.
|
|
||||||
namepass = ["swap"] # only "pass" swap metrics through the aggregator.
|
|
||||||
|
|
||||||
[[outputs.file]]
|
|
||||||
files = ["stdout"]
|
|
||||||
```
|
|
||||||
|
|
||||||
<a id="measurement-filtering"></a>
|
|
||||||
### Metric Filtering
|
|
||||||
|
|
||||||
Metric filtering can be configured per plugin on any input, output, processor,
|
|
||||||
and aggregator plugin. Filters fall under two categories: Selectors and
|
|
||||||
Modifiers.
|
|
||||||
|
|
||||||
#### Selectors
|
|
||||||
|
|
||||||
Selector filters include or exclude entire metrics. When a metric is excluded
|
|
||||||
from a Input or an Output plugin, the metric is dropped. If a metric is
|
|
||||||
excluded from a Processor or Aggregator plugin, it is skips the plugin and is
|
|
||||||
sent onwards to the next stage of processing.
|
|
||||||
|
|
||||||
- **namepass**:
|
|
||||||
An array of glob pattern strings. Only metrics whose measurement name matches
|
|
||||||
a pattern in this list are emitted.
|
|
||||||
|
|
||||||
- **namedrop**:
|
|
||||||
The inverse of `namepass`. If a match is found the metric is discarded. This
|
|
||||||
is tested on metrics after they have passed the `namepass` test.
|
|
||||||
|
|
||||||
- **tagpass**:
|
|
||||||
A table mapping tag keys to arrays of glob pattern strings. Only metrics
|
|
||||||
that contain a tag key in the table and a tag value matching one of its
|
|
||||||
patterns is emitted.
|
|
||||||
|
|
||||||
- **tagdrop**:
|
|
||||||
The inverse of `tagpass`. If a match is found the metric is discarded. This
|
|
||||||
is tested on metrics after they have passed the `tagpass` test.
|
|
||||||
|
|
||||||
#### Modifiers
|
|
||||||
|
|
||||||
Modifier filters remove tags and fields from a metric. If all fields are
|
|
||||||
removed the metric is removed.
|
|
||||||
|
|
||||||
- **fieldpass**:
|
|
||||||
An array of glob pattern strings. Only fields whose field key matches a
|
|
||||||
pattern in this list are emitted.
|
|
||||||
|
|
||||||
- **fielddrop**:
|
|
||||||
The inverse of `fieldpass`. Fields with a field key matching one of the
|
|
||||||
patterns will be discarded from the metric. This is tested on metrics after
|
|
||||||
they have passed the `fieldpass` test.
|
|
||||||
|
|
||||||
- **taginclude**:
|
|
||||||
An array of glob pattern strings. Only tags with a tag key matching one of
|
|
||||||
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
|
|
||||||
metric based on its tag, `taginclude` removes all non matching tags from the
|
|
||||||
metric. Any tag can be filtered including global tags and the agent `host`
|
|
||||||
tag.
|
|
||||||
|
|
||||||
- **tagexclude**:
|
|
||||||
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
|
|
||||||
will be discarded from the metric. Any tag can be filtered including global
|
|
||||||
tags and the agent `host` tag.
|
|
||||||
|
|
||||||
#### Filtering Examples
|
|
||||||
|
|
||||||
##### Using tagpass and tagdrop:
|
|
||||||
```toml
|
```toml
|
||||||
[[inputs.cpu]]
|
[[inputs.cpu]]
|
||||||
percpu = true
|
percpu = true
|
||||||
|
@ -474,22 +231,10 @@ tags and the agent `host` tag.
|
||||||
fstype = [ "ext4", "xfs" ]
|
fstype = [ "ext4", "xfs" ]
|
||||||
# Globs can also be used on the tag values
|
# Globs can also be used on the tag values
|
||||||
path = [ "/opt", "/home*" ]
|
path = [ "/opt", "/home*" ]
|
||||||
|
|
||||||
[[inputs.win_perf_counters]]
|
|
||||||
[[inputs.win_perf_counters.object]]
|
|
||||||
ObjectName = "Network Interface"
|
|
||||||
Instances = ["*"]
|
|
||||||
Counters = [
|
|
||||||
"Bytes Received/sec",
|
|
||||||
"Bytes Sent/sec"
|
|
||||||
]
|
|
||||||
Measurement = "win_net"
|
|
||||||
# Don't send metrics where the Windows interface name (instance) begins with isatap or Local
|
|
||||||
[inputs.win_perf_counters.tagdrop]
|
|
||||||
instance = ["isatap*", "Local*"]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Using fieldpass and fielddrop:
|
#### Input Config: fieldpass and fielddrop
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
# Drop all metrics for guest & steal CPU usage
|
# Drop all metrics for guest & steal CPU usage
|
||||||
[[inputs.cpu]]
|
[[inputs.cpu]]
|
||||||
|
@ -502,7 +247,8 @@ tags and the agent `host` tag.
|
||||||
fieldpass = ["inodes*"]
|
fieldpass = ["inodes*"]
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Using namepass and namedrop:
|
#### Input Config: namepass and namedrop
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
# Drop all metrics about containers for kubelet
|
# Drop all metrics about containers for kubelet
|
||||||
[[inputs.prometheus]]
|
[[inputs.prometheus]]
|
||||||
|
@ -515,7 +261,8 @@ tags and the agent `host` tag.
|
||||||
namepass = ["rest_client_*"]
|
namepass = ["rest_client_*"]
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Using taginclude and tagexclude:
|
#### Input Config: taginclude and tagexclude
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
# Only include the "cpu" tag in the measurements for the cpu plugin.
|
# Only include the "cpu" tag in the measurements for the cpu plugin.
|
||||||
[[inputs.cpu]]
|
[[inputs.cpu]]
|
||||||
|
@ -528,7 +275,64 @@ tags and the agent `host` tag.
|
||||||
tagexclude = ["fstype"]
|
tagexclude = ["fstype"]
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Metrics can be routed to different outputs using the metric name and tags:
|
#### Input config: prefix, suffix, and override
|
||||||
|
|
||||||
|
This plugin will emit measurements with the name `cpu_total`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cpu]]
|
||||||
|
name_suffix = "_total"
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
```
|
||||||
|
|
||||||
|
This will emit measurements with the name `foobar`
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cpu]]
|
||||||
|
name_override = "foobar"
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Input config: tags
|
||||||
|
|
||||||
|
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||||
|
`tag2=bar`
|
||||||
|
|
||||||
|
NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the
|
||||||
|
plugin definition.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
[inputs.cpu.tags]
|
||||||
|
tag1 = "foo"
|
||||||
|
tag2 = "bar"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multiple inputs of the same type
|
||||||
|
|
||||||
|
Additional inputs (or outputs) of the same type can be specified,
|
||||||
|
just define more instances in the config file. It is highly recommended that
|
||||||
|
you utilize `name_override`, `name_prefix`, or `name_suffix` config options
|
||||||
|
to avoid measurement collisions:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = false
|
||||||
|
totalcpu = true
|
||||||
|
|
||||||
|
[[inputs.cpu]]
|
||||||
|
percpu = true
|
||||||
|
totalcpu = false
|
||||||
|
name_override = "percpu_usage"
|
||||||
|
fielddrop = ["cpu_time*"]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Output Configuration Examples:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[[outputs.influxdb]]
|
[[outputs.influxdb]]
|
||||||
urls = [ "http://localhost:8086" ]
|
urls = [ "http://localhost:8086" ]
|
||||||
|
@ -550,43 +354,50 @@ tags and the agent `host` tag.
|
||||||
cpu = ["cpu0"]
|
cpu = ["cpu0"]
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Routing metrics to different outputs based on the input.
|
#### Aggregator Configuration Examples:
|
||||||
|
|
||||||
Metrics are tagged with `influxdb_database` in the input, which is then used to
|
This will collect and emit the min/max of the system load1 metric every
|
||||||
select the output. The tag is removed in the outputs before writing.
|
30s, dropping the originals.
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[[outputs.influxdb]]
|
[[inputs.system]]
|
||||||
urls = ["http://influxdb.example.com"]
|
fieldpass = ["load1"] # collects system load1 metric.
|
||||||
database = "db_default"
|
|
||||||
[outputs.influxdb.tagdrop]
|
|
||||||
influxdb_database = ["*"]
|
|
||||||
|
|
||||||
[[outputs.influxdb]]
|
[[aggregators.minmax]]
|
||||||
urls = ["http://influxdb.example.com"]
|
period = "30s" # send & clear the aggregate every 30s.
|
||||||
database = "db_other"
|
drop_original = true # drop the original metrics.
|
||||||
tagexclude = ["influxdb_database"]
|
|
||||||
[outputs.influxdb.tagpass]
|
|
||||||
influxdb_database = ["other"]
|
|
||||||
|
|
||||||
[[inputs.disk]]
|
[[outputs.file]]
|
||||||
[inputs.disk.tags]
|
files = ["stdout"]
|
||||||
influxdb_database = "other"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Transport Layer Security (TLS)
|
This will collect and emit the min/max of the swap metrics every
|
||||||
|
30s, dropping the originals. The aggregator will not be applied
|
||||||
|
to the system load metrics due to the `namepass` parameter.
|
||||||
|
|
||||||
Reference the detailed [TLS][] documentation.
|
```toml
|
||||||
|
[[inputs.swap]]
|
||||||
|
|
||||||
[TOML]: https://github.com/toml-lang/toml#toml
|
[[inputs.system]]
|
||||||
[global tags]: #global-tags
|
fieldpass = ["load1"] # collects system load1 metric.
|
||||||
[interval]: #intervals
|
|
||||||
[agent]: #agent
|
[[aggregators.minmax]]
|
||||||
[plugins]: #plugins
|
period = "30s" # send & clear the aggregate every 30s.
|
||||||
[inputs]: #input-plugins
|
drop_original = true # drop the original metrics.
|
||||||
[outputs]: #output-plugins
|
namepass = ["swap"] # only "pass" swap metrics through the aggregator.
|
||||||
[processors]: #processor-plugins
|
|
||||||
[aggregators]: #aggregator-plugins
|
[[outputs.file]]
|
||||||
[metric filtering]: #metric-filtering
|
files = ["stdout"]
|
||||||
[telegraf.conf]: /etc/telegraf.conf
|
```
|
||||||
[TLS]: /docs/TLS.md
|
|
||||||
|
#### Processor Configuration Examples:
|
||||||
|
|
||||||
|
Print only the metrics with `cpu` as the measurement name, all metrics are
|
||||||
|
passed to the output:
|
||||||
|
```toml
|
||||||
|
[[processors.printer]]
|
||||||
|
namepass = "cpu"
|
||||||
|
|
||||||
|
[[outputs.file]]
|
||||||
|
files = ["/tmp/metrics.out"]
|
||||||
|
```
|
||||||
|
|
|
@ -1,24 +1,37 @@
|
||||||
# Input Data Formats
|
# Telegraf Input Data Formats
|
||||||
|
|
||||||
Telegraf contains many general purpose plugins that support parsing input data
|
Telegraf is able to parse the following input data formats into metrics:
|
||||||
using a configurable parser into [metrics][]. This allows, for example, the
|
|
||||||
`kafka_consumer` input plugin to process messages in either InfluxDB Line
|
|
||||||
Protocol or in JSON format.
|
|
||||||
|
|
||||||
- [InfluxDB Line Protocol](/plugins/parsers/influx)
|
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx)
|
||||||
- [Collectd](/plugins/parsers/collectd)
|
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json)
|
||||||
- [CSV](/plugins/parsers/csv)
|
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
|
||||||
- [Dropwizard](/plugins/parsers/dropwizard)
|
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
|
||||||
- [Graphite](/plugins/parsers/graphite)
|
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
|
||||||
- [Grok](/plugins/parsers/grok)
|
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
|
||||||
- [JSON](/plugins/parsers/json)
|
1. [Dropwizard](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#dropwizard)
|
||||||
- [Logfmt](/plugins/parsers/logfmt)
|
|
||||||
- [Nagios](/plugins/parsers/nagios)
|
|
||||||
- [Value](/plugins/parsers/value), ie: 45 or "booyah"
|
|
||||||
- [Wavefront](/plugins/parsers/wavefront)
|
|
||||||
|
|
||||||
Any input plugin containing the `data_format` option can use it to select the
|
Telegraf metrics, like InfluxDB
|
||||||
desired parser:
|
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||||
|
are a combination of four basic parts:
|
||||||
|
|
||||||
|
1. Measurement Name
|
||||||
|
1. Tags
|
||||||
|
1. Fields
|
||||||
|
1. Timestamp
|
||||||
|
|
||||||
|
These four parts are easily defined when using InfluxDB line-protocol as a
|
||||||
|
data format. But there are other data formats that users may want to use which
|
||||||
|
require more advanced configuration to create usable Telegraf metrics.
|
||||||
|
|
||||||
|
Plugins such as `exec` and `kafka_consumer` parse textual data. Up until now,
|
||||||
|
these plugins were statically configured to parse just a single
|
||||||
|
data format. `exec` mostly only supported parsing JSON, and `kafka_consumer` only
|
||||||
|
supported data in InfluxDB line-protocol.
|
||||||
|
|
||||||
|
But now we are normalizing the parsing of various data formats across all
|
||||||
|
plugins that can support it. You will be able to identify a plugin that supports
|
||||||
|
different data formats by the presence of a `data_format` config option, for
|
||||||
|
example, in the exec plugin:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[[inputs.exec]]
|
[[inputs.exec]]
|
||||||
|
@ -33,6 +46,610 @@ desired parser:
|
||||||
## more about them here:
|
## more about them here:
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
data_format = "json"
|
data_format = "json"
|
||||||
|
|
||||||
|
## Additional configuration options go here
|
||||||
```
|
```
|
||||||
|
|
||||||
[metrics]: /docs/METRICS.md
|
Each data_format has an additional set of configuration options available, which
|
||||||
|
I'll go over below.
|
||||||
|
|
||||||
|
# Influx:
|
||||||
|
|
||||||
|
There are no additional configuration options for InfluxDB line-protocol. The
|
||||||
|
metrics are parsed directly into Telegraf metrics.
|
||||||
|
|
||||||
|
#### Influx Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
```
|
||||||
|
|
||||||
|
# JSON:
|
||||||
|
|
||||||
|
The JSON data format flattens JSON into metric _fields_.
|
||||||
|
NOTE: Only numerical values are converted to fields, and they are converted
|
||||||
|
into a float. strings are ignored unless specified as a tag_key (see below).
|
||||||
|
|
||||||
|
So for example, this JSON:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"a": 5,
|
||||||
|
"b": {
|
||||||
|
"c": 6
|
||||||
|
},
|
||||||
|
"ignored": "I'm a string"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Would get translated into _fields_ of a measurement:
|
||||||
|
|
||||||
|
```
|
||||||
|
myjsonmetric a=5,b_c=6
|
||||||
|
```
|
||||||
|
|
||||||
|
The _measurement_ _name_ is usually the name of the plugin,
|
||||||
|
but can be overridden using the `name_override` config option.
|
||||||
|
|
||||||
|
#### JSON Configuration:
|
||||||
|
|
||||||
|
The JSON data format supports specifying "tag keys". If specified, keys
|
||||||
|
will be searched for in the root-level of the JSON blob. If the key(s) exist,
|
||||||
|
they will be applied as tags to the Telegraf metrics.
|
||||||
|
|
||||||
|
For example, if you had this configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "json"
|
||||||
|
|
||||||
|
## List of tag names to extract from top-level of JSON server response
|
||||||
|
tag_keys = [
|
||||||
|
"my_tag_1",
|
||||||
|
"my_tag_2"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
with this JSON output from a command:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"a": 5,
|
||||||
|
"b": {
|
||||||
|
"c": 6
|
||||||
|
},
|
||||||
|
"my_tag_1": "foo"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Your Telegraf metrics would get tagged with "my_tag_1"
|
||||||
|
|
||||||
|
```
|
||||||
|
exec_mycollector,my_tag_1=foo a=5,b_c=6
|
||||||
|
```
|
||||||
|
|
||||||
|
If the JSON data is an array, then each element of the array is parsed with the configured settings.
|
||||||
|
Each resulting metric will be output with the same timestamp.
|
||||||
|
|
||||||
|
For example, if the following configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/usr/bin/mycollector --foo=bar"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "json"
|
||||||
|
|
||||||
|
## List of tag names to extract from top-level of JSON server response
|
||||||
|
tag_keys = [
|
||||||
|
"my_tag_1",
|
||||||
|
"my_tag_2"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
with this JSON output from a command:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"a": 5,
|
||||||
|
"b": {
|
||||||
|
"c": 6
|
||||||
|
},
|
||||||
|
"my_tag_1": "foo",
|
||||||
|
"my_tag_2": "baz"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"a": 7,
|
||||||
|
"b": {
|
||||||
|
"c": 8
|
||||||
|
},
|
||||||
|
"my_tag_1": "bar",
|
||||||
|
"my_tag_2": "baz"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2"
|
||||||
|
|
||||||
|
```
|
||||||
|
exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6
|
||||||
|
exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8
|
||||||
|
```
|
||||||
|
|
||||||
|
# Value:
|
||||||
|
|
||||||
|
The "value" data format translates single values into Telegraf metrics. This
|
||||||
|
is done by assigning a measurement name and setting a single field ("value")
|
||||||
|
as the parsed metric.
|
||||||
|
|
||||||
|
#### Value Configuration:
|
||||||
|
|
||||||
|
You **must** tell Telegraf what type of metric to collect by using the
|
||||||
|
`data_type` configuration option. Available options are:
|
||||||
|
|
||||||
|
1. integer
|
||||||
|
2. float or long
|
||||||
|
3. string
|
||||||
|
4. boolean
|
||||||
|
|
||||||
|
**Note:** It is also recommended that you set `name_override` to a measurement
|
||||||
|
name that makes sense for your metric, otherwise it will just be set to the
|
||||||
|
name of the plugin.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["cat /proc/sys/kernel/random/entropy_avail"]
|
||||||
|
|
||||||
|
## override the default metric name of "exec"
|
||||||
|
name_override = "entropy_available"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "value"
|
||||||
|
data_type = "integer" # required
|
||||||
|
```
|
||||||
|
|
||||||
|
# Graphite:
|
||||||
|
|
||||||
|
The Graphite data format translates graphite _dot_ buckets directly into
|
||||||
|
telegraf measurement names, with a single value field, and without any tags.
|
||||||
|
By default, the separator is left as ".", but this can be changed using the
|
||||||
|
"separator" argument. For more advanced options,
|
||||||
|
Telegraf supports specifying "templates" to translate
|
||||||
|
graphite buckets into Telegraf metrics.
|
||||||
|
|
||||||
|
Templates are of the form:
|
||||||
|
|
||||||
|
```
|
||||||
|
"host.mytag.mytag.measurement.measurement.field*"
|
||||||
|
```
|
||||||
|
|
||||||
|
Where the following keywords exist:
|
||||||
|
|
||||||
|
1. `measurement`: specifies that this section of the graphite bucket corresponds
|
||||||
|
to the measurement name. This can be specified multiple times.
|
||||||
|
2. `field`: specifies that this section of the graphite bucket corresponds
|
||||||
|
to the field name. This can be specified multiple times.
|
||||||
|
3. `measurement*`: specifies that all remaining elements of the graphite bucket
|
||||||
|
correspond to the measurement name.
|
||||||
|
4. `field*`: specifies that all remaining elements of the graphite bucket
|
||||||
|
correspond to the field name.
|
||||||
|
|
||||||
|
Any part of the template that is not a keyword is treated as a tag key. This
|
||||||
|
can also be specified multiple times.
|
||||||
|
|
||||||
|
NOTE: `field*` cannot be used in conjunction with `measurement*`!
|
||||||
|
|
||||||
|
#### Measurement & Tag Templates:
|
||||||
|
|
||||||
|
The most basic template is to specify a single transformation to apply to all
|
||||||
|
incoming metrics. So the following template:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
templates = [
|
||||||
|
"region.region.measurement*"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
|
```
|
||||||
|
us.west.cpu.load 100
|
||||||
|
=> cpu.load,region=us.west value=100
|
||||||
|
```
|
||||||
|
|
||||||
|
Multiple templates can also be specified, but these should be differentiated
|
||||||
|
using _filters_ (see below for more details)
|
||||||
|
|
||||||
|
```toml
|
||||||
|
templates = [
|
||||||
|
"*.*.* region.region.measurement", # <- all 3-part measurements will match this one.
|
||||||
|
"*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one.
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Field Templates:
|
||||||
|
|
||||||
|
The field keyword tells Telegraf to give the metric that field name.
|
||||||
|
So the following template:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
separator = "_"
|
||||||
|
templates = [
|
||||||
|
"measurement.measurement.field.field.region"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu.usage.idle.percent.eu-east 100
|
||||||
|
=> cpu_usage,region=eu-east idle_percent=100
|
||||||
|
```
|
||||||
|
|
||||||
|
The field key can also be derived from all remaining elements of the graphite
|
||||||
|
bucket by specifying `field*`:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
separator = "_"
|
||||||
|
templates = [
|
||||||
|
"measurement.measurement.region.field*"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
which would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu.usage.eu-east.idle.percentage 100
|
||||||
|
=> cpu_usage,region=eu-east idle_percentage=100
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Filter Templates:
|
||||||
|
|
||||||
|
Users can also filter the template(s) to use based on the name of the bucket,
|
||||||
|
using glob matching, like so:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
templates = [
|
||||||
|
"cpu.* measurement.measurement.region",
|
||||||
|
"mem.* measurement.measurement.host"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
which would result in the following transformation:
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu.load.eu-east 100
|
||||||
|
=> cpu_load,region=eu-east value=100
|
||||||
|
|
||||||
|
mem.cached.localhost 256
|
||||||
|
=> mem_cached,host=localhost value=256
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Adding Tags:
|
||||||
|
|
||||||
|
Additional tags can be added to a metric that don't exist on the received metric.
|
||||||
|
You can add additional tags by specifying them after the pattern.
|
||||||
|
Tags have the same format as the line protocol.
|
||||||
|
Multiple tags are separated by commas.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
templates = [
|
||||||
|
"measurement.measurement.field.region datacenter=1a"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
would result in the following Graphite -> Telegraf transformation.
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu.usage.idle.eu-east 100
|
||||||
|
=> cpu_usage,region=eu-east,datacenter=1a idle=100
|
||||||
|
```
|
||||||
|
|
||||||
|
There are many more options available,
|
||||||
|
[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates)
|
||||||
|
|
||||||
|
#### Graphite Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "graphite"
|
||||||
|
|
||||||
|
## This string will be used to join the matched values.
|
||||||
|
separator = "_"
|
||||||
|
|
||||||
|
## Each template line requires a template pattern. It can have an optional
|
||||||
|
## filter before the template and separated by spaces. It can also have optional extra
|
||||||
|
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||||
|
## similar to the line protocol format. There can be only one default template.
|
||||||
|
## Templates support below format:
|
||||||
|
## 1. filter + template
|
||||||
|
## 2. filter + template + extra tag(s)
|
||||||
|
## 3. filter + template with field key
|
||||||
|
## 4. default template
|
||||||
|
templates = [
|
||||||
|
"*.app env.service.resource.measurement",
|
||||||
|
"stats.* .host.measurement* region=eu-east,agent=sensu",
|
||||||
|
"stats2.* .host.measurement.field",
|
||||||
|
"measurement*"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
# Nagios:
|
||||||
|
|
||||||
|
There are no additional configuration options for Nagios line-protocol. The
|
||||||
|
metrics are parsed directly into Telegraf metrics.
|
||||||
|
|
||||||
|
Note: Nagios Input Data Formats is only supported in `exec` input plugin.
|
||||||
|
|
||||||
|
#### Nagios Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
|
||||||
|
|
||||||
|
## measurement name suffix (for separating different commands)
|
||||||
|
name_suffix = "_mycollector"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "nagios"
|
||||||
|
```
|
||||||
|
|
||||||
|
# Collectd:
|
||||||
|
|
||||||
|
The collectd format parses the collectd binary network protocol. Tags are
|
||||||
|
created for host, instance, type, and type instance. All collectd values are
|
||||||
|
added as float64 fields.
|
||||||
|
|
||||||
|
For more information about the binary network protocol see
|
||||||
|
[here](https://collectd.org/wiki/index.php/Binary_protocol).
|
||||||
|
|
||||||
|
You can control the cryptographic settings with parser options. Create an
|
||||||
|
authentication file and set `collectd_auth_file` to the path of the file, then
|
||||||
|
set the desired security level in `collectd_security_level`.
|
||||||
|
|
||||||
|
Additional information including client setup can be found
|
||||||
|
[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
|
||||||
|
|
||||||
|
You can also change the path to the typesdb or add additional typesdb using
|
||||||
|
`collectd_typesdb`.
|
||||||
|
|
||||||
|
#### Collectd Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.socket_listener]]
|
||||||
|
service_address = "udp://127.0.0.1:25826"
|
||||||
|
name_prefix = "collectd_"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "collectd"
|
||||||
|
|
||||||
|
## Authentication file for cryptographic security levels
|
||||||
|
collectd_auth_file = "/etc/collectd/auth_file"
|
||||||
|
## One of none (default), sign, or encrypt
|
||||||
|
collectd_security_level = "encrypt"
|
||||||
|
## Path of to TypesDB specifications
|
||||||
|
collectd_typesdb = ["/usr/share/collectd/types.db"]
|
||||||
|
```
|
||||||
|
|
||||||
|
# Dropwizard:
|
||||||
|
|
||||||
|
The dropwizard format can parse the JSON representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining custom [measurement & tag templates](./DATA_FORMATS_INPUT.md#measurement--tag-templates). All field value types are supported, `string`, `number` and `boolean`.
|
||||||
|
|
||||||
|
A typical JSON of a dropwizard metric registry:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": "3.0.0",
|
||||||
|
"counters" : {
|
||||||
|
"measurement,tag1=green" : {
|
||||||
|
"count" : 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"meters" : {
|
||||||
|
"measurement" : {
|
||||||
|
"count" : 1,
|
||||||
|
"m15_rate" : 1.0,
|
||||||
|
"m1_rate" : 1.0,
|
||||||
|
"m5_rate" : 1.0,
|
||||||
|
"mean_rate" : 1.0,
|
||||||
|
"units" : "events/second"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gauges" : {
|
||||||
|
"measurement" : {
|
||||||
|
"value" : 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"histograms" : {
|
||||||
|
"measurement" : {
|
||||||
|
"count" : 1,
|
||||||
|
"max" : 1.0,
|
||||||
|
"mean" : 1.0,
|
||||||
|
"min" : 1.0,
|
||||||
|
"p50" : 1.0,
|
||||||
|
"p75" : 1.0,
|
||||||
|
"p95" : 1.0,
|
||||||
|
"p98" : 1.0,
|
||||||
|
"p99" : 1.0,
|
||||||
|
"p999" : 1.0,
|
||||||
|
"stddev" : 1.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timers" : {
|
||||||
|
"measurement" : {
|
||||||
|
"count" : 1,
|
||||||
|
"max" : 1.0,
|
||||||
|
"mean" : 1.0,
|
||||||
|
"min" : 1.0,
|
||||||
|
"p50" : 1.0,
|
||||||
|
"p75" : 1.0,
|
||||||
|
"p95" : 1.0,
|
||||||
|
"p98" : 1.0,
|
||||||
|
"p99" : 1.0,
|
||||||
|
"p999" : 1.0,
|
||||||
|
"stddev" : 1.0,
|
||||||
|
"m15_rate" : 1.0,
|
||||||
|
"m1_rate" : 1.0,
|
||||||
|
"m5_rate" : 1.0,
|
||||||
|
"mean_rate" : 1.0,
|
||||||
|
"duration_units" : "seconds",
|
||||||
|
"rate_units" : "calls/second"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Would get translated into 4 different measurements:
|
||||||
|
|
||||||
|
```
|
||||||
|
measurement,metric_type=counter,tag1=green count=1
|
||||||
|
measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
|
||||||
|
measurement,metric_type=gauge value=1
|
||||||
|
measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0
|
||||||
|
measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field.
|
||||||
|
Eg. to parse the following JSON document:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"time" : "2017-02-22T14:33:03.662+02:00",
|
||||||
|
"tags" : {
|
||||||
|
"tag1" : "green",
|
||||||
|
"tag2" : "yellow"
|
||||||
|
},
|
||||||
|
"metrics" : {
|
||||||
|
"counters" : {
|
||||||
|
"measurement" : {
|
||||||
|
"count" : 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"meters" : {},
|
||||||
|
"gauges" : {},
|
||||||
|
"histograms" : {},
|
||||||
|
"timers" : {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
and translate it into:
|
||||||
|
|
||||||
|
```
|
||||||
|
measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000
|
||||||
|
```
|
||||||
|
|
||||||
|
you simply need to use the following additional configuration properties:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
dropwizard_metric_registry_path = "metrics"
|
||||||
|
dropwizard_time_path = "time"
|
||||||
|
dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
|
||||||
|
dropwizard_tags_path = "tags"
|
||||||
|
## tag paths per tag are supported too, eg.
|
||||||
|
#[inputs.yourinput.dropwizard_tag_paths]
|
||||||
|
# tag1 = "tags.tag1"
|
||||||
|
# tag2 = "tags.tag2"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
For more information about the dropwizard json format see
|
||||||
|
[here](http://metrics.dropwizard.io/3.1.0/manual/json/).
|
||||||
|
|
||||||
|
#### Dropwizard Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[inputs.exec]]
|
||||||
|
## Commands array
|
||||||
|
commands = ["curl http://localhost:8080/sys/metrics"]
|
||||||
|
timeout = "5s"
|
||||||
|
|
||||||
|
## Data format to consume.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||||
|
data_format = "dropwizard"
|
||||||
|
|
||||||
|
## Used by the templating engine to join matched values when cardinality is > 1
|
||||||
|
separator = "_"
|
||||||
|
|
||||||
|
## Each template line requires a template pattern. It can have an optional
|
||||||
|
## filter before the template and separated by spaces. It can also have optional extra
|
||||||
|
## tags following the template. Multiple tags should be separated by commas and no spaces
|
||||||
|
## similar to the line protocol format. There can be only one default template.
|
||||||
|
## Templates support below format:
|
||||||
|
## 1. filter + template
|
||||||
|
## 2. filter + template + extra tag(s)
|
||||||
|
## 3. filter + template with field key
|
||||||
|
## 4. default template
|
||||||
|
## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>)
|
||||||
|
templates = []
|
||||||
|
|
||||||
|
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
|
||||||
|
## to locate the metric registry within the JSON document
|
||||||
|
# dropwizard_metric_registry_path = "metrics"
|
||||||
|
|
||||||
|
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
|
||||||
|
## to locate the default time of the measurements within the JSON document
|
||||||
|
# dropwizard_time_path = "time"
|
||||||
|
# dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
|
||||||
|
|
||||||
|
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
|
||||||
|
## to locate the tags map within the JSON document
|
||||||
|
# dropwizard_tags_path = "tags"
|
||||||
|
|
||||||
|
## You may even use tag paths per tag
|
||||||
|
# [inputs.exec.dropwizard_tag_paths]
|
||||||
|
# tag1 = "tags.tag1"
|
||||||
|
# tag2 = "tags.tag2"
|
||||||
|
|
||||||
|
```
|
|
@ -1,19 +1,34 @@
|
||||||
# Output Data Formats
|
# Telegraf Output Data Formats
|
||||||
|
|
||||||
In addition to output specific data formats, Telegraf supports a set of
|
Telegraf is able to serialize metrics into the following output data formats:
|
||||||
standard data formats that may be selected from when configuring many output
|
|
||||||
plugins.
|
|
||||||
|
|
||||||
1. [InfluxDB Line Protocol](/plugins/serializers/influx)
|
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
|
||||||
1. [Carbon2](/plugins/serializers/carbon2)
|
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
|
||||||
1. [Graphite](/plugins/serializers/graphite)
|
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
|
||||||
1. [JSON](/plugins/serializers/json)
|
|
||||||
1. [Prometheus](/plugins/serializers/prometheus)
|
|
||||||
1. [SplunkMetric](/plugins/serializers/splunkmetric)
|
|
||||||
1. [Wavefront](/plugins/serializers/wavefront)
|
|
||||||
|
|
||||||
You will be able to identify the plugins with support by the presence of a
|
Telegraf metrics, like InfluxDB
|
||||||
`data_format` config option, for example, in the `file` output plugin:
|
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
|
||||||
|
are a combination of four basic parts:
|
||||||
|
|
||||||
|
1. Measurement Name
|
||||||
|
1. Tags
|
||||||
|
1. Fields
|
||||||
|
1. Timestamp
|
||||||
|
|
||||||
|
In InfluxDB line protocol, these 4 parts are easily defined in textual form:
|
||||||
|
|
||||||
|
```
|
||||||
|
measurement_name[,tag1=val1,...] field1=val1[,field2=val2,...] [timestamp]
|
||||||
|
```
|
||||||
|
|
||||||
|
For Telegraf outputs that write textual data (such as `kafka`, `mqtt`, and `file`),
|
||||||
|
InfluxDB line protocol was originally the only available output format. But now
|
||||||
|
we are normalizing telegraf metric "serializers" into a
|
||||||
|
[plugin-like interface](https://github.com/influxdata/telegraf/tree/master/plugins/serializers)
|
||||||
|
across all output plugins that can support it.
|
||||||
|
You will be able to identify a plugin that supports different data formats
|
||||||
|
by the presence of a `data_format`
|
||||||
|
config option, for example, in the `file` output plugin:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[[outputs.file]]
|
[[outputs.file]]
|
||||||
|
@ -25,4 +40,124 @@ You will be able to identify the plugins with support by the presence of a
|
||||||
## more about them here:
|
## more about them here:
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
|
|
||||||
|
## Additional configuration options go here
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Each data_format has an additional set of configuration options available, which
|
||||||
|
I'll go over below.
|
||||||
|
|
||||||
|
# Influx:
|
||||||
|
|
||||||
|
There are no additional configuration options for InfluxDB line-protocol. The
|
||||||
|
metrics are serialized directly into InfluxDB line-protocol.
|
||||||
|
|
||||||
|
### Influx Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[outputs.file]]
|
||||||
|
## Files to write to, "stdout" is a specially handled file.
|
||||||
|
files = ["stdout", "/tmp/metrics.out"]
|
||||||
|
|
||||||
|
## Data format to output.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
|
data_format = "influx"
|
||||||
|
```
|
||||||
|
|
||||||
|
# Graphite:
|
||||||
|
|
||||||
|
The Graphite data format translates Telegraf metrics into _dot_ buckets. A
|
||||||
|
template can be specified for the output of Telegraf metrics into Graphite
|
||||||
|
buckets. The default template is:
|
||||||
|
|
||||||
|
```
|
||||||
|
template = "host.tags.measurement.field"
|
||||||
|
```
|
||||||
|
|
||||||
|
In the above template, we have four parts:
|
||||||
|
|
||||||
|
1. _host_ is a tag key. This can be any tag key that is in the Telegraf
|
||||||
|
metric(s). If the key doesn't exist, it will be ignored. If it does exist, the
|
||||||
|
tag value will be filled in.
|
||||||
|
1. _tags_ is a special keyword that outputs all remaining tag values, separated
|
||||||
|
by dots and in alphabetical order (by tag key). These will be filled after all
|
||||||
|
tag keys are filled.
|
||||||
|
1. _measurement_ is a special keyword that outputs the measurement name.
|
||||||
|
1. _field_ is a special keyword that outputs the field name.
|
||||||
|
|
||||||
|
Which means the following influx metric -> graphite conversion would happen:
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758
|
||||||
|
=>
|
||||||
|
tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
|
||||||
|
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
|
||||||
|
```
|
||||||
|
|
||||||
|
Fields with string values will be skipped. Boolean fields will be converted
|
||||||
|
to 1 (true) or 0 (false).
|
||||||
|
|
||||||
|
### Graphite Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[outputs.file]]
|
||||||
|
## Files to write to, "stdout" is a specially handled file.
|
||||||
|
files = ["stdout", "/tmp/metrics.out"]
|
||||||
|
|
||||||
|
## Data format to output.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
|
data_format = "graphite"
|
||||||
|
|
||||||
|
# prefix each graphite bucket
|
||||||
|
prefix = "telegraf"
|
||||||
|
# graphite template
|
||||||
|
template = "host.tags.measurement.field"
|
||||||
|
```
|
||||||
|
|
||||||
|
# JSON:
|
||||||
|
|
||||||
|
The JSON data format serialized Telegraf metrics in json format. The format is:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"fields":{
|
||||||
|
"field_1":30,
|
||||||
|
"field_2":4,
|
||||||
|
"field_N":59,
|
||||||
|
"n_images":660
|
||||||
|
},
|
||||||
|
"name":"docker",
|
||||||
|
"tags":{
|
||||||
|
"host":"raynor"
|
||||||
|
},
|
||||||
|
"timestamp":1458229140
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### JSON Configuration:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[outputs.file]]
|
||||||
|
## Files to write to, "stdout" is a specially handled file.
|
||||||
|
files = ["stdout", "/tmp/metrics.out"]
|
||||||
|
|
||||||
|
## Data format to output.
|
||||||
|
## Each data format has its own unique set of configuration options, read
|
||||||
|
## more about them here:
|
||||||
|
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||||
|
data_format = "json"
|
||||||
|
json_timestamp_units = "1ns"
|
||||||
|
```
|
||||||
|
|
||||||
|
By default, the timestamp that is output in JSON data format serialized Telegraf
|
||||||
|
metrics is in seconds. The precision of this timestamp can be adjusted for any output
|
||||||
|
by adding the optional `json_timestamp_units` parameter to the configuration for
|
||||||
|
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
|
||||||
|
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
|
||||||
|
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
|
||||||
|
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
|
||||||
|
output in hundredths of a second (`10ms`).
|
||||||
|
|
32
docs/FAQ.md
32
docs/FAQ.md
|
@ -10,13 +10,10 @@ docker run --name telegraf
|
||||||
-v /etc:/hostfs/etc:ro
|
-v /etc:/hostfs/etc:ro
|
||||||
-v /proc:/hostfs/proc:ro
|
-v /proc:/hostfs/proc:ro
|
||||||
-v /sys:/hostfs/sys:ro
|
-v /sys:/hostfs/sys:ro
|
||||||
-v /var:/hostfs/var:ro
|
-v /var/run/utmp:/var/run/utmp:ro
|
||||||
-v /run:/hostfs/run:ro
|
|
||||||
-e HOST_ETC=/hostfs/etc
|
-e HOST_ETC=/hostfs/etc
|
||||||
-e HOST_PROC=/hostfs/proc
|
-e HOST_PROC=/hostfs/proc
|
||||||
-e HOST_SYS=/hostfs/sys
|
-e HOST_SYS=/hostfs/sys
|
||||||
-e HOST_VAR=/hostfs/var
|
|
||||||
-e HOST_RUN=/hostfs/run
|
|
||||||
-e HOST_MOUNT_PREFIX=/hostfs
|
-e HOST_MOUNT_PREFIX=/hostfs
|
||||||
telegraf
|
telegraf
|
||||||
```
|
```
|
||||||
|
@ -43,33 +40,6 @@ If running as a service add the environment variable to `/etc/default/telegraf`:
|
||||||
GODEBUG=netdns=cgo
|
GODEBUG=netdns=cgo
|
||||||
```
|
```
|
||||||
|
|
||||||
### Q: How can I manage series cardinality?
|
|
||||||
|
|
||||||
High [series cardinality][], when not properly managed, can cause high load on
|
|
||||||
your database. Telegraf attempts to avoid creating series with high
|
|
||||||
cardinality, but some monitoring workloads such as tracking containers are are
|
|
||||||
inherently high cardinality. These workloads can still be monitored, but care
|
|
||||||
must be taken to manage cardinality growth.
|
|
||||||
|
|
||||||
You can use the following techniques to avoid cardinality issues:
|
|
||||||
|
|
||||||
- Use [metric filtering][] options to exclude unneeded measurements and tags.
|
|
||||||
- Write to a database with an appropriate [retention policy][].
|
|
||||||
- Limit series cardinality in your database using the
|
|
||||||
[max-series-per-database][] and [max-values-per-tag][] settings.
|
|
||||||
- Consider using the [Time Series Index][tsi].
|
|
||||||
- Monitor your databases using the [show cardinality][] commands.
|
|
||||||
- Consult the [InfluxDB documentation][influx docs] for the most up-to-date techniques.
|
|
||||||
|
|
||||||
[series cardinality]: https://docs.influxdata.com/influxdb/v1.7/concepts/glossary/#series-cardinality
|
|
||||||
[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering
|
|
||||||
[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/
|
|
||||||
[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000
|
|
||||||
[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000
|
|
||||||
[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/
|
|
||||||
[show cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality
|
|
||||||
[influx docs]: https://docs.influxdata.com/influxdb/latest/
|
|
||||||
|
|
||||||
### Q: When will the next version be released?
|
### Q: When will the next version be released?
|
||||||
|
|
||||||
The latest release date estimate can be viewed on the
|
The latest release date estimate can be viewed on the
|
||||||
|
|
149
docs/INPUTS.md
149
docs/INPUTS.md
|
@ -1,149 +0,0 @@
|
||||||
### Input Plugins
|
|
||||||
|
|
||||||
This section is for developers who want to create new collection inputs.
|
|
||||||
Telegraf is entirely plugin driven. This interface allows for operators to
|
|
||||||
pick and chose what is gathered and makes it easy for developers
|
|
||||||
to create new ways of generating metrics.
|
|
||||||
|
|
||||||
Plugin authorship is kept as simple as possible to promote people to develop
|
|
||||||
and submit new inputs.
|
|
||||||
|
|
||||||
### Input Plugin Guidelines
|
|
||||||
|
|
||||||
- A plugin must conform to the [telegraf.Input][] interface.
|
|
||||||
- Input Plugins should call `inputs.Add` in their `init` function to register
|
|
||||||
themselves. See below for a quick example.
|
|
||||||
- Input Plugins must be added to the
|
|
||||||
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
|
|
||||||
- The `SampleConfig` function should return valid toml that describes how the
|
|
||||||
plugin can be configured. This is included in `telegraf config`. Please
|
|
||||||
consult the [SampleConfig][] page for the latest style
|
|
||||||
guidelines.
|
|
||||||
- The `Description` function should say in one line what this plugin does.
|
|
||||||
- Follow the recommended [CodeStyle][].
|
|
||||||
|
|
||||||
Let's say you've written a plugin that emits metrics about processes on the
|
|
||||||
current host.
|
|
||||||
|
|
||||||
### Input Plugin Example
|
|
||||||
|
|
||||||
```go
|
|
||||||
package simple
|
|
||||||
|
|
||||||
// simple.go
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Simple struct {
|
|
||||||
Ok bool `toml:"ok"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) Description() string {
|
|
||||||
return "a demo plugin"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) SampleConfig() string {
|
|
||||||
return `
|
|
||||||
## Indicate if everything is fine
|
|
||||||
ok = true
|
|
||||||
`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) Init() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) Gather(acc telegraf.Accumulator) error {
|
|
||||||
if s.Ok {
|
|
||||||
acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil)
|
|
||||||
} else {
|
|
||||||
acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Development
|
|
||||||
|
|
||||||
* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker
|
|
||||||
dev environment using docker-compose.
|
|
||||||
* ***[Optional]*** When developing a plugin, add a `dev` directory with a
|
|
||||||
`docker-compose.yml` and `telegraf.conf` as well as any other supporting
|
|
||||||
files, where sensible.
|
|
||||||
|
|
||||||
### Typed Metrics
|
|
||||||
|
|
||||||
In addition the the `AddFields` function, the accumulator also supports
|
|
||||||
functions to add typed metrics: `AddGauge`, `AddCounter`, etc. Metric types
|
|
||||||
are ignored by the InfluxDB output, but can be used for other outputs, such as
|
|
||||||
[prometheus][prom metric types].
|
|
||||||
|
|
||||||
### Data Formats
|
|
||||||
|
|
||||||
Some input plugins, such as the [exec][] plugin, can accept any supported
|
|
||||||
[input data formats][].
|
|
||||||
|
|
||||||
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
|
|
||||||
function on the plugin object (see the exec plugin for an example), as well as
|
|
||||||
defining `parser` as a field of the object.
|
|
||||||
|
|
||||||
You can then utilize the parser internally in your plugin, parsing data as you
|
|
||||||
see fit. Telegraf's configuration layer will take care of instantiating and
|
|
||||||
creating the `Parser` object.
|
|
||||||
|
|
||||||
Add the following to the `SampleConfig()`:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
## Data format to consume.
|
|
||||||
## Each data format has its own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
||||||
data_format = "influx"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Service Input Plugins
|
|
||||||
|
|
||||||
This section is for developers who want to create new "service" collection
|
|
||||||
inputs. A service plugin differs from a regular plugin in that it operates a
|
|
||||||
background service while Telegraf is running. One example would be the
|
|
||||||
`statsd` plugin, which operates a statsd server.
|
|
||||||
|
|
||||||
Service Input Plugins are substantially more complicated than a regular
|
|
||||||
plugin, as they will require threads and locks to verify data integrity.
|
|
||||||
Service Input Plugins should be avoided unless there is no way to create their
|
|
||||||
behavior with a regular plugin.
|
|
||||||
|
|
||||||
To create a Service Input implement the [telegraf.ServiceInput][] interface.
|
|
||||||
|
|
||||||
### Metric Tracking
|
|
||||||
|
|
||||||
Metric Tracking provides a system to be notified when metrics have been
|
|
||||||
successfully written to their outputs or otherwise discarded. This allows
|
|
||||||
inputs to be created that function as reliable queue consumers.
|
|
||||||
|
|
||||||
To get started with metric tracking begin by calling `WithTracking` on the
|
|
||||||
[telegraf.Accumulator][]. Add metrics using the `AddTrackingMetricGroup`
|
|
||||||
function on the returned [telegraf.TrackingAccumulator][] and store the
|
|
||||||
`TrackingID`. The `Delivered()` channel will return a type with information
|
|
||||||
about the final delivery status of the metric group.
|
|
||||||
|
|
||||||
Check the [amqp_consumer][] for an example implementation.
|
|
||||||
|
|
||||||
[exec]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec
|
|
||||||
[amqp_consumer]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/amqp_consumer
|
|
||||||
[prom metric types]: https://prometheus.io/docs/concepts/metric_types/
|
|
||||||
[input data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
|
||||||
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
|
|
||||||
[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle
|
|
||||||
[telegraf.Input]: https://godoc.org/github.com/influxdata/telegraf#Input
|
|
||||||
[telegraf.ServiceInput]: https://godoc.org/github.com/influxdata/telegraf#ServiceInput
|
|
||||||
[telegraf.Accumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator
|
|
||||||
[telegraf.TrackingAccumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator
|
|
|
@ -3,170 +3,105 @@
|
||||||
When distributed in a binary form, Telegraf may contain portions of the
|
When distributed in a binary form, Telegraf may contain portions of the
|
||||||
following works:
|
following works:
|
||||||
|
|
||||||
- cloud.google.com/go [Apache License 2.0](https://github.com/googleapis/google-cloud-go/blob/master/LICENSE)
|
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
|
||||||
- code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE)
|
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
|
||||||
- collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD)
|
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
|
||||||
- github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE)
|
- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE)
|
||||||
- github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE)
|
- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
|
||||||
- github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE)
|
- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE)
|
||||||
- github.com/Azure/azure-sdk-for-go [Apache License 2.0](https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE)
|
- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)
|
||||||
- github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE)
|
- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
|
||||||
- github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE)
|
- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
||||||
- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE)
|
- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license)
|
||||||
- github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE)
|
- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
||||||
- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE)
|
- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
|
||||||
- github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
|
- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
||||||
- github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE)
|
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
|
||||||
- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE)
|
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
||||||
- github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
|
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
|
||||||
- github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING)
|
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
|
||||||
- github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE)
|
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
||||||
- github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE)
|
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
||||||
- github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE)
|
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
|
||||||
- github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING)
|
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
|
||||||
- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
|
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
|
||||||
- github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE)
|
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
|
||||||
- github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE)
|
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
|
||||||
- github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE)
|
- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
||||||
- github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE)
|
- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
|
||||||
- github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
|
- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE)
|
||||||
- github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE)
|
- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
|
||||||
- github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
|
- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE)
|
||||||
- github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
|
- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE)
|
||||||
- github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md)
|
- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013)
|
||||||
- github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE)
|
- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
||||||
- github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt)
|
- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
|
||||||
- github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE)
|
- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE)
|
||||||
- github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
|
- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
|
||||||
- github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE)
|
- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
|
||||||
- github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE)
|
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
|
||||||
- github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE)
|
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
|
||||||
- github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE)
|
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
|
||||||
- github.com/docker/go-units [Apache License 2.0](https://github.com/docker/go-units/blob/master/LICENSE)
|
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
|
||||||
- github.com/docker/libnetwork [Apache License 2.0](https://github.com/docker/libnetwork/blob/master/LICENSE)
|
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
|
||||||
- github.com/eapache/go-resiliency [MIT License](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
|
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
|
||||||
- github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
|
- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE)
|
||||||
- github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE)
|
- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
|
||||||
- github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
|
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
||||||
- github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE)
|
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
|
||||||
- github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE)
|
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
||||||
- github.com/glinton/ping [MIT License](https://github.com/glinton/ping/blob/master/LICENSE)
|
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
|
||||||
- github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
|
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
|
||||||
- github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE)
|
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
||||||
- github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE)
|
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
|
||||||
- github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
|
- github.com/nats-io/gnatsd [MIT](https://github.com/nats-io/gnatsd/blob/master/LICENSE)
|
||||||
- github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE)
|
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
|
||||||
- github.com/goburrow/serial [MIT License](https://github.com/goburrow/serial/LICENSE)
|
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
|
||||||
- github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE)
|
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
|
||||||
- github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE)
|
- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
|
||||||
- github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE)
|
- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
|
||||||
- github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE)
|
- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
|
||||||
- github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE)
|
- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
|
||||||
- github.com/golang/mock [Apache License 2.0](https://github.com/golang/mock/blob/master/LICENSE)
|
- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE)
|
||||||
- github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE)
|
- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE)
|
||||||
- github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE)
|
- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)
|
||||||
- github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE)
|
- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
|
||||||
- github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE)
|
- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
||||||
- github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE)
|
- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE)
|
||||||
- github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE)
|
- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE)
|
||||||
- github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE)
|
- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE)
|
||||||
- github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
|
- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
|
||||||
- github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE)
|
- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
||||||
- github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE)
|
- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE)
|
||||||
- github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE)
|
- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE)
|
||||||
- github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE)
|
- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE)
|
||||||
- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/LICENSE)
|
- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
|
||||||
- github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE)
|
- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
|
||||||
- github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE)
|
- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE)
|
||||||
- github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
|
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||||
- github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE)
|
- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE)
|
||||||
- github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE)
|
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
|
||||||
- github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE)
|
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
|
||||||
- github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE)
|
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
|
||||||
- github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
|
- github.com/tidwall/gjson [MIT](https://github.com/tidwall/gjson/blob/master/LICENSE)
|
||||||
- github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE)
|
- github.com/tidwall/match [MIT](https://github.com/tidwall/match/blob/master/LICENSE)
|
||||||
- github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE)
|
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
|
||||||
- github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE)
|
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
|
||||||
- github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
|
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
|
||||||
- github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE)
|
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
||||||
- github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE)
|
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
||||||
- github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE)
|
- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
|
||||||
- github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE)
|
- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt)
|
||||||
- github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE)
|
- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE)
|
||||||
- github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
|
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
|
||||||
- github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md)
|
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
|
||||||
- github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md)
|
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
|
||||||
- github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md)
|
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
|
||||||
- github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE)
|
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
|
||||||
- github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE)
|
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
||||||
- github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
|
- gopkg.in/fsnotify.v1 [BSD](https://github.com/fsnotify/fsnotify/blob/v1.4.2/LICENSE)
|
||||||
- github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
|
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
|
||||||
- github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
|
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
||||||
- github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE)
|
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
|
||||||
- github.com/nats-io/nats-server [Apache License 2.0](https://github.com/nats-io/nats-server/blob/master/LICENSE)
|
- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
|
||||||
- github.com/nats-io/nats.go [Apache License 2.0](https://github.com/nats-io/nats.go/blob/master/LICENSE)
|
- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE)
|
||||||
- github.com/nats-io/nkeys [Apache License 2.0](https://github.com/nats-io/nkeys/blob/master/LICENSE)
|
|
||||||
- github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE)
|
|
||||||
- github.com/newrelic/newrelic-telemetry-sdk-go [Apache License 2.0](https://github.com/newrelic/newrelic-telemetry-sdk-go/blob/master/LICENSE.md)
|
|
||||||
- github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
|
|
||||||
- github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE)
|
|
||||||
- github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE)
|
|
||||||
- github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE)
|
|
||||||
- github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
|
|
||||||
- github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE)
|
|
||||||
- github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE)
|
|
||||||
- github.com/pmezard/go-difflib [BSD 3-Clause Clear License](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
|
|
||||||
- github.com/prometheus/client_golang [Apache License 2.0](https://github.com/prometheus/client_golang/blob/master/LICENSE)
|
|
||||||
- github.com/prometheus/client_model [Apache License 2.0](https://github.com/prometheus/client_model/blob/master/LICENSE)
|
|
||||||
- github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE)
|
|
||||||
- github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE)
|
|
||||||
- github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
|
|
||||||
- github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE)
|
|
||||||
- github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
|
|
||||||
- github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE)
|
|
||||||
- github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE)
|
|
||||||
- github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE)
|
|
||||||
- github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE)
|
|
||||||
- github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE)
|
|
||||||
- github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE)
|
|
||||||
- github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE)
|
|
||||||
- github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE)
|
|
||||||
- github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE)
|
|
||||||
- github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE)
|
|
||||||
- github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE)
|
|
||||||
- github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE)
|
|
||||||
- github.com/vmware/govmomi [Apache License 2.0](https://github.com/vmware/govmomi/blob/master/LICENSE.txt)
|
|
||||||
- github.com/wavefronthq/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE)
|
|
||||||
- github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
|
|
||||||
- github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
|
|
||||||
- github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
|
|
||||||
- go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE)
|
|
||||||
- golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE)
|
|
||||||
- golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE)
|
|
||||||
- golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE)
|
|
||||||
- golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE)
|
|
||||||
- golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE)
|
|
||||||
- golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE)
|
|
||||||
- golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE)
|
|
||||||
- golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md)
|
|
||||||
- golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md)
|
|
||||||
- google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE)
|
|
||||||
- google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE)
|
|
||||||
- google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE)
|
|
||||||
- gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE)
|
|
||||||
- gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
|
|
||||||
- gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE)
|
|
||||||
- gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE)
|
|
||||||
- gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE)
|
|
||||||
- gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE)
|
|
||||||
- gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE)
|
|
||||||
- gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE)
|
|
||||||
- gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE)
|
|
||||||
- gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE)
|
|
||||||
- gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
|
|
||||||
- gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE)
|
|
||||||
- gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
|
|
||||||
- gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE)
|
|
||||||
|
|
||||||
## telegraf used and modified code from these projects
|
|
||||||
- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE)
|
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
# Metrics
|
|
||||||
|
|
||||||
Telegraf metrics are the internal representation used to model data during
|
|
||||||
processing. Metrics are closely based on InfluxDB's data model and contain
|
|
||||||
four main components:
|
|
||||||
|
|
||||||
- **Measurement Name**: Description and namespace for the metric.
|
|
||||||
- **Tags**: Key/Value string pairs and usually used to identify the
|
|
||||||
metric.
|
|
||||||
- **Fields**: Key/Value pairs that are typed and usually contain the
|
|
||||||
metric data.
|
|
||||||
- **Timestamp**: Date and time associated with the fields.
|
|
||||||
|
|
||||||
This metric type exists only in memory and must be converted to a concrete
|
|
||||||
representation in order to be transmitted or viewed. To achieve this we
|
|
||||||
provide several [output data formats][] sometimes referred to as
|
|
||||||
*serializers*. Our default serializer converts to [InfluxDB Line
|
|
||||||
Protocol][line protocol] which provides a high performance and one-to-one
|
|
||||||
direct mapping from Telegraf metrics.
|
|
||||||
|
|
||||||
[output data formats]: /docs/DATA_FORMATS_OUTPUT.md
|
|
||||||
[line protocol]: /plugins/serializers/influx
|
|
114
docs/OUTPUTS.md
114
docs/OUTPUTS.md
|
@ -1,114 +0,0 @@
|
||||||
### Output Plugins
|
|
||||||
|
|
||||||
This section is for developers who want to create a new output sink. Outputs
|
|
||||||
are created in a similar manner as collection plugins, and their interface has
|
|
||||||
similar constructs.
|
|
||||||
|
|
||||||
### Output Plugin Guidelines
|
|
||||||
|
|
||||||
- An output must conform to the [telegraf.Output][] interface.
|
|
||||||
- Outputs should call `outputs.Add` in their `init` function to register
|
|
||||||
themselves. See below for a quick example.
|
|
||||||
- To be available within Telegraf itself, plugins must add themselves to the
|
|
||||||
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
|
|
||||||
- The `SampleConfig` function should return valid toml that describes how the
|
|
||||||
plugin can be configured. This is included in `telegraf config`. Please
|
|
||||||
consult the [SampleConfig][] page for the latest style guidelines.
|
|
||||||
- The `Description` function should say in one line what this output does.
|
|
||||||
- Follow the recommended [CodeStyle][].
|
|
||||||
|
|
||||||
### Output Plugin Example
|
|
||||||
|
|
||||||
```go
|
|
||||||
package simpleoutput
|
|
||||||
|
|
||||||
// simpleoutput.go
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Simple struct {
|
|
||||||
Ok bool `toml:"ok"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) Description() string {
|
|
||||||
return "a demo output"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) SampleConfig() string {
|
|
||||||
return `
|
|
||||||
ok = true
|
|
||||||
`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) Init() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) Connect() error {
|
|
||||||
// Make a connection to the URL here
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) Close() error {
|
|
||||||
// Close connection to the URL here
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simple) Write(metrics []telegraf.Metric) error {
|
|
||||||
for _, metric := range metrics {
|
|
||||||
// write `metric` to the output sink here
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
## Data Formats
|
|
||||||
|
|
||||||
Some output plugins, such as the [file][] plugin, can write in any supported
|
|
||||||
[output data formats][].
|
|
||||||
|
|
||||||
In order to enable this, you must specify a
|
|
||||||
`SetSerializer(serializer serializers.Serializer)`
|
|
||||||
function on the plugin object (see the file plugin for an example), as well as
|
|
||||||
defining `serializer` as a field of the object.
|
|
||||||
|
|
||||||
You can then utilize the serializer internally in your plugin, serializing data
|
|
||||||
before it's written. Telegraf's configuration layer will take care of
|
|
||||||
instantiating and creating the `Serializer` object.
|
|
||||||
|
|
||||||
You should also add the following to your `SampleConfig()`:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
## Data format to output.
|
|
||||||
## Each data format has its own unique set of configuration options, read
|
|
||||||
## more about them here:
|
|
||||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
||||||
data_format = "influx"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Flushing Metrics to Outputs
|
|
||||||
|
|
||||||
Metrics are flushed to outputs when any of the following events happen:
|
|
||||||
- `flush_interval + rand(flush_jitter)` has elapsed since start or the last flush interval
|
|
||||||
- At least `metric_batch_size` count of metrics are waiting in the buffer
|
|
||||||
- The telegraf process has received a SIGUSR1 signal
|
|
||||||
|
|
||||||
Note that if the flush takes longer than the `agent.interval` to write the metrics
|
|
||||||
to the output, you'll see a message saying the output `did not complete within its
|
|
||||||
flush interval`. This may mean your output is not keeping up with the flow of metrics,
|
|
||||||
and you may want to look into enabling compression, reducing the size of your metrics,
|
|
||||||
or investigate other reasons why the writes might be taking longer than expected.
|
|
||||||
|
|
||||||
[file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file
|
|
||||||
[output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
|
||||||
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
|
|
||||||
[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle
|
|
||||||
[telegraf.Output]: https://godoc.org/github.com/influxdata/telegraf#Output
|
|
|
@ -1,69 +0,0 @@
|
||||||
### Processor Plugins
|
|
||||||
|
|
||||||
This section is for developers who want to create a new processor plugin.
|
|
||||||
|
|
||||||
### Processor Plugin Guidelines
|
|
||||||
|
|
||||||
* A processor must conform to the [telegraf.Processor][] interface.
|
|
||||||
* Processors should call `processors.Add` in their `init` function to register
|
|
||||||
themselves. See below for a quick example.
|
|
||||||
* To be available within Telegraf itself, plugins must add themselves to the
|
|
||||||
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
|
|
||||||
* The `SampleConfig` function should return valid toml that describes how the
|
|
||||||
processor can be configured. This is include in the output of `telegraf
|
|
||||||
config`.
|
|
||||||
- The `SampleConfig` function should return valid toml that describes how the
|
|
||||||
plugin can be configured. This is included in `telegraf config`. Please
|
|
||||||
consult the [SampleConfig][] page for the latest style guidelines.
|
|
||||||
* The `Description` function should say in one line what this processor does.
|
|
||||||
- Follow the recommended [CodeStyle][].
|
|
||||||
|
|
||||||
### Processor Plugin Example
|
|
||||||
|
|
||||||
```go
|
|
||||||
package printer
|
|
||||||
|
|
||||||
// printer.go
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/plugins/processors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Printer struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
var sampleConfig = `
|
|
||||||
`
|
|
||||||
|
|
||||||
func (p *Printer) SampleConfig() string {
|
|
||||||
return sampleConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Printer) Description() string {
|
|
||||||
return "Print all metrics that pass through this filter."
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Printer) Init() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
|
||||||
for _, metric := range in {
|
|
||||||
fmt.Println(metric.String())
|
|
||||||
}
|
|
||||||
return in
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
processors.Add("printer", func() telegraf.Processor {
|
|
||||||
return &Printer{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig
|
|
||||||
[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle
|
|
||||||
[telegraf.Processor]: https://godoc.org/github.com/influxdata/telegraf#Processor
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Telegraf
|
|
||||||
|
|
||||||
- Concepts
|
|
||||||
- [Metrics][metrics]
|
|
||||||
- [Input Data Formats][parsers]
|
|
||||||
- [Output Data Formats][serializers]
|
|
||||||
- [Aggregators & Processors][aggproc]
|
|
||||||
- Administration
|
|
||||||
- [Configuration][conf]
|
|
||||||
- [Profiling][profiling]
|
|
||||||
- [Windows Service][winsvc]
|
|
||||||
- [FAQ][faq]
|
|
||||||
|
|
||||||
[conf]: /docs/CONFIGURATION.md
|
|
||||||
[metrics]: /docs/METRICS.md
|
|
||||||
[parsers]: /docs/DATA_FORMATS_INPUT.md
|
|
||||||
[serializers]: /docs/DATA_FORMATS_OUTPUT.md
|
|
||||||
[aggproc]: /docs/AGGREGATORS_AND_PROCESSORS.md
|
|
||||||
[profiling]: /docs/PROFILING.md
|
|
||||||
[winsvc]: /docs/WINDOWS_SERVICE.md
|
|
||||||
[faq]: /docs/FAQ.md
|
|
|
@ -1,135 +0,0 @@
|
||||||
# Template Patterns
|
|
||||||
|
|
||||||
Template patterns are a mini language that describes how a dot delimited
|
|
||||||
string should be mapped to and from [metrics][].
|
|
||||||
|
|
||||||
A template has the form:
|
|
||||||
```
|
|
||||||
"host.mytag.mytag.measurement.measurement.field*"
|
|
||||||
```
|
|
||||||
|
|
||||||
Where the following keywords can be set:
|
|
||||||
|
|
||||||
1. `measurement`: specifies that this section of the graphite bucket corresponds
|
|
||||||
to the measurement name. This can be specified multiple times.
|
|
||||||
2. `field`: specifies that this section of the graphite bucket corresponds
|
|
||||||
to the field name. This can be specified multiple times.
|
|
||||||
3. `measurement*`: specifies that all remaining elements of the graphite bucket
|
|
||||||
correspond to the measurement name.
|
|
||||||
4. `field*`: specifies that all remaining elements of the graphite bucket
|
|
||||||
correspond to the field name.
|
|
||||||
|
|
||||||
Any part of the template that is not a keyword is treated as a tag key. This
|
|
||||||
can also be specified multiple times.
|
|
||||||
|
|
||||||
**NOTE:** `field*` cannot be used in conjunction with `measurement*`.
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
|
|
||||||
#### Measurement & Tag Templates
|
|
||||||
|
|
||||||
The most basic template is to specify a single transformation to apply to all
|
|
||||||
incoming metrics. So the following template:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
templates = [
|
|
||||||
"region.region.measurement*"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
would result in the following Graphite -> Telegraf transformation.
|
|
||||||
|
|
||||||
```
|
|
||||||
us.west.cpu.load 100
|
|
||||||
=> cpu.load,region=us.west value=100
|
|
||||||
```
|
|
||||||
|
|
||||||
Multiple templates can also be specified, but these should be differentiated
|
|
||||||
using _filters_ (see below for more details)
|
|
||||||
|
|
||||||
```toml
|
|
||||||
templates = [
|
|
||||||
"*.*.* region.region.measurement", # <- all 3-part measurements will match this one.
|
|
||||||
"*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one.
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Field Templates
|
|
||||||
|
|
||||||
The field keyword tells Telegraf to give the metric that field name.
|
|
||||||
So the following template:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
separator = "_"
|
|
||||||
templates = [
|
|
||||||
"measurement.measurement.field.field.region"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
would result in the following Graphite -> Telegraf transformation.
|
|
||||||
|
|
||||||
```
|
|
||||||
cpu.usage.idle.percent.eu-east 100
|
|
||||||
=> cpu_usage,region=eu-east idle_percent=100
|
|
||||||
```
|
|
||||||
|
|
||||||
The field key can also be derived from all remaining elements of the graphite
|
|
||||||
bucket by specifying `field*`:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
separator = "_"
|
|
||||||
templates = [
|
|
||||||
"measurement.measurement.region.field*"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
which would result in the following Graphite -> Telegraf transformation.
|
|
||||||
|
|
||||||
```
|
|
||||||
cpu.usage.eu-east.idle.percentage 100
|
|
||||||
=> cpu_usage,region=eu-east idle_percentage=100
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Filter Templates
|
|
||||||
|
|
||||||
Users can also filter the template(s) to use based on the name of the bucket,
|
|
||||||
using glob matching, like so:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
templates = [
|
|
||||||
"cpu.* measurement.measurement.region",
|
|
||||||
"mem.* measurement.measurement.host"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
which would result in the following transformation:
|
|
||||||
|
|
||||||
```
|
|
||||||
cpu.load.eu-east 100
|
|
||||||
=> cpu_load,region=eu-east value=100
|
|
||||||
|
|
||||||
mem.cached.localhost 256
|
|
||||||
=> mem_cached,host=localhost value=256
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Adding Tags
|
|
||||||
|
|
||||||
Additional tags can be added to a metric that don't exist on the received metric.
|
|
||||||
You can add additional tags by specifying them after the pattern.
|
|
||||||
Tags have the same format as the line protocol.
|
|
||||||
Multiple tags are separated by commas.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
templates = [
|
|
||||||
"measurement.measurement.field.region datacenter=1a"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
would result in the following Graphite -> Telegraf transformation.
|
|
||||||
|
|
||||||
```
|
|
||||||
cpu.usage.idle.eu-east 100
|
|
||||||
=> cpu_usage,region=eu-east,datacenter=1a idle=100
|
|
||||||
```
|
|
||||||
|
|
||||||
[metrics]: /docs/METRICS.md
|
|
105
docs/TLS.md
105
docs/TLS.md
|
@ -1,105 +0,0 @@
|
||||||
# Transport Layer Security
|
|
||||||
|
|
||||||
There is an ongoing effort to standardize TLS options across plugins. When
|
|
||||||
possible, plugins will provide the standard settings described below. With the
|
|
||||||
exception of the advanced configuration available TLS settings will be
|
|
||||||
documented in the sample configuration.
|
|
||||||
|
|
||||||
### Client Configuration
|
|
||||||
|
|
||||||
For client TLS support we have the following options:
|
|
||||||
```toml
|
|
||||||
## Root certificates for verifying server certificates encoded in PEM format.
|
|
||||||
# tls_ca = "/etc/telegraf/ca.pem"
|
|
||||||
|
|
||||||
## The public and private keypairs for the client encoded in PEM format. May
|
|
||||||
## contain intermediate certificates.
|
|
||||||
# tls_cert = "/etc/telegraf/cert.pem"
|
|
||||||
# tls_key = "/etc/telegraf/key.pem"
|
|
||||||
## Skip TLS verification.
|
|
||||||
# insecure_skip_verify = false
|
|
||||||
```
|
|
||||||
|
|
||||||
### Server Configuration
|
|
||||||
|
|
||||||
The server TLS configuration provides support for TLS mutual authentication:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
## Set one or more allowed client CA certificate file names to
|
|
||||||
## enable mutually authenticated TLS connections.
|
|
||||||
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
|
|
||||||
|
|
||||||
## Add service certificate and key.
|
|
||||||
# tls_cert = "/etc/telegraf/cert.pem"
|
|
||||||
# tls_key = "/etc/telegraf/key.pem"
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Advanced Configuration
|
|
||||||
|
|
||||||
For plugins using the standard server configuration you can also set several
|
|
||||||
advanced settings. These options are not included in the sample configuration
|
|
||||||
for the interest of brevity.
|
|
||||||
|
|
||||||
```toml
|
|
||||||
## Define list of allowed ciphers suites. If not defined the default ciphers
|
|
||||||
## supported by Go will be used.
|
|
||||||
## ex: tls_cipher_suites = [
|
|
||||||
## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
|
||||||
## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
|
||||||
## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
|
||||||
## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
|
||||||
## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
|
||||||
## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
|
||||||
## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
|
||||||
## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
|
||||||
## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
|
||||||
## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
|
||||||
## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
|
||||||
## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
|
||||||
## "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
|
||||||
## "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
|
||||||
## "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
|
||||||
## "TLS_RSA_WITH_AES_128_CBC_SHA",
|
|
||||||
## "TLS_RSA_WITH_AES_256_CBC_SHA"
|
|
||||||
## ]
|
|
||||||
# tls_cipher_suites = []
|
|
||||||
|
|
||||||
## Minimum TLS version that is acceptable.
|
|
||||||
# tls_min_version = "TLS10"
|
|
||||||
|
|
||||||
## Maximum SSL/TLS version that is acceptable.
|
|
||||||
# tls_max_version = "TLS13"
|
|
||||||
```
|
|
||||||
|
|
||||||
Cipher suites for use with `tls_cipher_suites`:
|
|
||||||
- `TLS_RSA_WITH_RC4_128_SHA`
|
|
||||||
- `TLS_RSA_WITH_3DES_EDE_CBC_SHA`
|
|
||||||
- `TLS_RSA_WITH_AES_128_CBC_SHA`
|
|
||||||
- `TLS_RSA_WITH_AES_256_CBC_SHA`
|
|
||||||
- `TLS_RSA_WITH_AES_128_CBC_SHA256`
|
|
||||||
- `TLS_RSA_WITH_AES_128_GCM_SHA256`
|
|
||||||
- `TLS_RSA_WITH_AES_256_GCM_SHA384`
|
|
||||||
- `TLS_ECDHE_ECDSA_WITH_RC4_128_SHA`
|
|
||||||
- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`
|
|
||||||
- `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA`
|
|
||||||
- `TLS_ECDHE_RSA_WITH_RC4_128_SHA`
|
|
||||||
- `TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA`
|
|
||||||
- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`
|
|
||||||
- `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`
|
|
||||||
- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`
|
|
||||||
- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`
|
|
||||||
- `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`
|
|
||||||
- `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`
|
|
||||||
- `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`
|
|
||||||
- `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`
|
|
||||||
- `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`
|
|
||||||
- `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`
|
|
||||||
- `TLS_AES_128_GCM_SHA256`
|
|
||||||
- `TLS_AES_256_GCM_SHA384`
|
|
||||||
- `TLS_CHACHA20_POLY1305_SHA256`
|
|
||||||
|
|
||||||
TLS versions for use with `tls_min_version` or `tls_max_version`:
|
|
||||||
- `TLS10`
|
|
||||||
- `TLS11`
|
|
||||||
- `TLS12`
|
|
||||||
- `TLS13`
|
|
|
@ -5,7 +5,7 @@ the general steps to set it up.
|
||||||
|
|
||||||
1. Obtain the telegraf windows distribution
|
1. Obtain the telegraf windows distribution
|
||||||
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
|
||||||
location simply specify the `--config` parameter with the desired location)
|
location simply specify the `-config` parameter with the desired location)
|
||||||
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
|
||||||
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
|
||||||
|
|
||||||
|
@ -26,15 +26,6 @@ the general steps to set it up.
|
||||||
> net start telegraf
|
> net start telegraf
|
||||||
```
|
```
|
||||||
|
|
||||||
## Config Directory
|
|
||||||
|
|
||||||
You can also specify a `--config-directory` for the service to use:
|
|
||||||
1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d`
|
|
||||||
2. Include the `--config-directory` option when registering the service:
|
|
||||||
```
|
|
||||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d
|
|
||||||
```
|
|
||||||
|
|
||||||
## Other supported operations
|
## Other supported operations
|
||||||
|
|
||||||
Telegraf can manage its own service through the --service flag:
|
Telegraf can manage its own service through the --service flag:
|
||||||
|
@ -46,26 +37,9 @@ Telegraf can manage its own service through the --service flag:
|
||||||
| `telegraf.exe --service start` | Start the telegraf service |
|
| `telegraf.exe --service start` | Start the telegraf service |
|
||||||
| `telegraf.exe --service stop` | Stop the telegraf service |
|
| `telegraf.exe --service stop` | Stop the telegraf service |
|
||||||
|
|
||||||
## Install multiple services
|
|
||||||
|
|
||||||
Running multiple instances of Telegraf is seldom needed, as you can run
|
Troubleshooting common error #1067
|
||||||
multiple instances of each plugin and route metric flow using the metric
|
|
||||||
filtering options. However, if you do need to run multiple telegraf instances
|
|
||||||
on a single system, you can install the service with the `--service-name` and
|
|
||||||
`--service-display-name` flags to give the services unique names:
|
|
||||||
|
|
||||||
```
|
|
||||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 --service-display-name "Telegraf 1"
|
|
||||||
> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 --service-display-name "Telegraf 2"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded.
|
|
||||||
Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application
|
|
||||||
|
|
||||||
**Troubleshooting common error #1067**
|
|
||||||
|
|
||||||
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
|
||||||
|
|
||||||
--config "C:\Program Files\Telegraf\telegraf.conf"
|
--config C:\"Program Files"\Telegraf\telegraf.conf
|
||||||
|
|
4630
etc/telegraf.conf
4630
etc/telegraf.conf
File diff suppressed because it is too large
Load Diff
|
@ -1,26 +1,18 @@
|
||||||
# Telegraf Configuration
|
# Telegraf configuration
|
||||||
#
|
|
||||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||||
# declared inputs, and sent to the declared outputs.
|
# declared inputs, and sent to the declared outputs.
|
||||||
#
|
|
||||||
# Plugins must be declared in here to be active.
|
# Plugins must be declared in here to be active.
|
||||||
# To deactivate a plugin, comment out the name and any variables.
|
# To deactivate a plugin, comment out the name and any variables.
|
||||||
#
|
|
||||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||||
# file would generate.
|
# file would generate.
|
||||||
#
|
|
||||||
# Environment variables can be used anywhere in this config file, simply surround
|
|
||||||
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
|
|
||||||
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
|
|
||||||
|
|
||||||
|
|
||||||
# Global tags can be specified here in key="value" format.
|
# Global tags can be specified here in key="value" format.
|
||||||
[global_tags]
|
[global_tags]
|
||||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||||
# rack = "1a"
|
# rack = "1a"
|
||||||
## Environment variables can be used as tags, and throughout the config file
|
|
||||||
# user = "$USER"
|
|
||||||
|
|
||||||
|
|
||||||
# Configuration for telegraf agent
|
# Configuration for telegraf agent
|
||||||
[agent]
|
[agent]
|
||||||
|
@ -30,15 +22,11 @@
|
||||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||||
round_interval = true
|
round_interval = true
|
||||||
|
|
||||||
## Telegraf will send metrics to outputs in batches of at most
|
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||||
## metric_batch_size metrics.
|
## flush this buffer on a successful write.
|
||||||
## This controls the size of writes that Telegraf sends to output plugins.
|
metric_buffer_limit = 1000
|
||||||
metric_batch_size = 1000
|
## Flush the buffer whenever full, regardless of flush_interval.
|
||||||
|
flush_buffer_when_full = true
|
||||||
## Maximum number of unwritten metrics per output. Increasing this value
|
|
||||||
## allows for longer periods of output downtime without dropping metrics at the
|
|
||||||
## cost of higher maximum memory usage.
|
|
||||||
metric_buffer_limit = 10000
|
|
||||||
|
|
||||||
## Collection jitter is used to jitter the collection by a random amount.
|
## Collection jitter is used to jitter the collection by a random amount.
|
||||||
## Each plugin will sleep for a random time within jitter before collecting.
|
## Each plugin will sleep for a random time within jitter before collecting.
|
||||||
|
@ -46,197 +34,58 @@
|
||||||
## same time, which can have a measurable effect on the system.
|
## same time, which can have a measurable effect on the system.
|
||||||
collection_jitter = "0s"
|
collection_jitter = "0s"
|
||||||
|
|
||||||
## Default flushing interval for all outputs. Maximum flush_interval will be
|
## Default flushing interval for all outputs. You shouldn't set this below
|
||||||
## flush_interval + flush_jitter
|
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||||
flush_interval = "10s"
|
flush_interval = "10s"
|
||||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||||
## large write spikes for users running a large number of telegraf instances.
|
## large write spikes for users running a large number of telegraf instances.
|
||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||||
flush_jitter = "0s"
|
flush_jitter = "0s"
|
||||||
|
|
||||||
## By default or when set to "0s", precision will be set to the same
|
## Logging configuration:
|
||||||
## timestamp order as the collection interval, with the maximum being 1s.
|
## Run telegraf in debug mode
|
||||||
## ie, when interval = "10s", precision will be "1s"
|
debug = false
|
||||||
## when interval = "250ms", precision will be "1ms"
|
## Run telegraf in quiet mode
|
||||||
## Precision will NOT be used for service inputs. It is up to each individual
|
quiet = false
|
||||||
## service input to set the timestamp at the appropriate precision.
|
## Specify the log file name. The empty string means to log to stdout.
|
||||||
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
logfile = "/Program Files/Telegraf/telegraf.log"
|
||||||
precision = ""
|
|
||||||
|
|
||||||
## Log at debug level.
|
|
||||||
# debug = false
|
|
||||||
## Log only error level messages.
|
|
||||||
# quiet = false
|
|
||||||
|
|
||||||
## Log target controls the destination for logs and can be one of "file",
|
|
||||||
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
|
|
||||||
## is determined by the "logfile" setting.
|
|
||||||
# logtarget = "file"
|
|
||||||
|
|
||||||
## Name of the file to be logged to when using the "file" logtarget. If set to
|
|
||||||
## the empty string then logs are written to stderr.
|
|
||||||
# logfile = ""
|
|
||||||
|
|
||||||
## The logfile will be rotated after the time interval specified. When set
|
|
||||||
## to 0 no time based rotation is performed. Logs are rotated only when
|
|
||||||
## written to, if there is no log activity rotation may be delayed.
|
|
||||||
# logfile_rotation_interval = "0d"
|
|
||||||
|
|
||||||
## The logfile will be rotated when it becomes larger than the specified
|
|
||||||
## size. When set to 0 no size based rotation is performed.
|
|
||||||
# logfile_rotation_max_size = "0MB"
|
|
||||||
|
|
||||||
## Maximum number of rotated archives to keep, any older logs are deleted.
|
|
||||||
## If set to -1, no archives are removed.
|
|
||||||
# logfile_rotation_max_archives = 5
|
|
||||||
|
|
||||||
## Override default hostname, if empty use os.Hostname()
|
## Override default hostname, if empty use os.Hostname()
|
||||||
hostname = ""
|
hostname = ""
|
||||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
|
||||||
omit_hostname = false
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# OUTPUT PLUGINS #
|
# OUTPUTS #
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
|
# Configuration for influxdb server to send metrics to
|
||||||
# Configuration for sending metrics to InfluxDB
|
|
||||||
[[outputs.influxdb]]
|
[[outputs.influxdb]]
|
||||||
## The full HTTP or UDP URL for your InfluxDB instance.
|
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||||
##
|
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||||
## Multiple URLs can be specified for a single cluster, only ONE of the
|
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||||
## urls will be written to each interval.
|
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
|
||||||
# urls = ["unix:///var/run/influxdb.sock"]
|
urls = ["http://127.0.0.1:8086"] # required
|
||||||
# urls = ["udp://127.0.0.1:8089"]
|
# The target database for metrics (telegraf will create it if not exists)
|
||||||
# urls = ["http://127.0.0.1:8086"]
|
database = "telegraf" # required
|
||||||
|
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
# note: using second precision greatly helps InfluxDB compression
|
||||||
|
precision = "s"
|
||||||
|
|
||||||
## The target database for metrics; will be created as needed.
|
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||||
## For UDP url endpoint database needs to be configured on server side.
|
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
||||||
# database = "telegraf"
|
timeout = "5s"
|
||||||
|
|
||||||
## The value of this tag will be used to determine the database. If this
|
|
||||||
## tag is not set the 'database' option is used as the default.
|
|
||||||
# database_tag = ""
|
|
||||||
|
|
||||||
## If true, the 'database_tag' will not be included in the written metric.
|
|
||||||
# exclude_database_tag = false
|
|
||||||
|
|
||||||
## If true, no CREATE DATABASE queries will be sent. Set to true when using
|
|
||||||
## Telegraf with a user without permissions to create databases or when the
|
|
||||||
## database already exists.
|
|
||||||
# skip_database_creation = false
|
|
||||||
|
|
||||||
## Name of existing retention policy to write to. Empty string writes to
|
|
||||||
## the default retention policy. Only takes effect when using HTTP.
|
|
||||||
# retention_policy = ""
|
|
||||||
|
|
||||||
## The value of this tag will be used to determine the retention policy. If this
|
|
||||||
## tag is not set the 'retention_policy' option is used as the default.
|
|
||||||
# retention_policy_tag = ""
|
|
||||||
|
|
||||||
## If true, the 'retention_policy_tag' will not be included in the written metric.
|
|
||||||
# exclude_retention_policy_tag = false
|
|
||||||
|
|
||||||
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
|
|
||||||
## Only takes effect when using HTTP.
|
|
||||||
# write_consistency = "any"
|
|
||||||
|
|
||||||
## Timeout for HTTP messages.
|
|
||||||
# timeout = "5s"
|
|
||||||
|
|
||||||
## HTTP Basic Auth
|
|
||||||
# username = "telegraf"
|
# username = "telegraf"
|
||||||
# password = "metricsmetricsmetricsmetrics"
|
# password = "metricsmetricsmetricsmetrics"
|
||||||
|
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||||
## HTTP User-Agent
|
|
||||||
# user_agent = "telegraf"
|
# user_agent = "telegraf"
|
||||||
|
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||||
## UDP payload size is the maximum packet size to send.
|
# udp_payload = 512
|
||||||
# udp_payload = "512B"
|
|
||||||
|
|
||||||
## Optional TLS Config for use on HTTP connections.
|
|
||||||
# tls_ca = "/etc/telegraf/ca.pem"
|
|
||||||
# tls_cert = "/etc/telegraf/cert.pem"
|
|
||||||
# tls_key = "/etc/telegraf/key.pem"
|
|
||||||
## Use TLS but skip chain & host verification
|
|
||||||
# insecure_skip_verify = false
|
|
||||||
|
|
||||||
## HTTP Proxy override, if unset values the standard proxy environment
|
|
||||||
## variables are consulted to determine which proxy, if any, should be used.
|
|
||||||
# http_proxy = "http://corporate.proxy:3128"
|
|
||||||
|
|
||||||
## Additional HTTP headers
|
|
||||||
# http_headers = {"X-Special-Header" = "Special-Value"}
|
|
||||||
|
|
||||||
## HTTP Content-Encoding for write request body, can be set to "gzip" to
|
|
||||||
## compress body or "identity" to apply no encoding.
|
|
||||||
# content_encoding = "identity"
|
|
||||||
|
|
||||||
## When true, Telegraf will output unsigned integers as unsigned values,
|
|
||||||
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
|
|
||||||
## integer values. Enabling this option will result in field type errors if
|
|
||||||
## existing data has been written.
|
|
||||||
# influx_uint_support = false
|
|
||||||
|
|
||||||
# # Configuration for sending metrics to InfluxDB
|
|
||||||
# [[outputs.influxdb_v2]]
|
|
||||||
# ## The URLs of the InfluxDB cluster nodes.
|
|
||||||
# ##
|
|
||||||
# ## Multiple URLs can be specified for a single cluster, only ONE of the
|
|
||||||
# ## urls will be written to each interval.
|
|
||||||
# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
|
|
||||||
# urls = ["http://127.0.0.1:9999"]
|
|
||||||
#
|
|
||||||
# ## Token for authentication.
|
|
||||||
# token = ""
|
|
||||||
#
|
|
||||||
# ## Organization is the name of the organization you wish to write to; must exist.
|
|
||||||
# organization = ""
|
|
||||||
#
|
|
||||||
# ## Destination bucket to write into.
|
|
||||||
# bucket = ""
|
|
||||||
#
|
|
||||||
# ## The value of this tag will be used to determine the bucket. If this
|
|
||||||
# ## tag is not set the 'bucket' option is used as the default.
|
|
||||||
# # bucket_tag = ""
|
|
||||||
#
|
|
||||||
# ## If true, the bucket tag will not be added to the metric.
|
|
||||||
# # exclude_bucket_tag = false
|
|
||||||
#
|
|
||||||
# ## Timeout for HTTP messages.
|
|
||||||
# # timeout = "5s"
|
|
||||||
#
|
|
||||||
# ## Additional HTTP headers
|
|
||||||
# # http_headers = {"X-Special-Header" = "Special-Value"}
|
|
||||||
#
|
|
||||||
# ## HTTP Proxy override, if unset values the standard proxy environment
|
|
||||||
# ## variables are consulted to determine which proxy, if any, should be used.
|
|
||||||
# # http_proxy = "http://corporate.proxy:3128"
|
|
||||||
#
|
|
||||||
# ## HTTP User-Agent
|
|
||||||
# # user_agent = "telegraf"
|
|
||||||
#
|
|
||||||
# ## Content-Encoding for write request body, can be set to "gzip" to
|
|
||||||
# ## compress body or "identity" to apply no encoding.
|
|
||||||
# # content_encoding = "gzip"
|
|
||||||
#
|
|
||||||
# ## Enable or disable uint support for writing uints influxdb 2.0.
|
|
||||||
# # influx_uint_support = false
|
|
||||||
#
|
|
||||||
# ## Optional TLS Config for use on HTTP connections.
|
|
||||||
# # tls_ca = "/etc/telegraf/ca.pem"
|
|
||||||
# # tls_cert = "/etc/telegraf/cert.pem"
|
|
||||||
# # tls_key = "/etc/telegraf/key.pem"
|
|
||||||
# ## Use TLS but skip chain & host verification
|
|
||||||
# # insecure_skip_verify = false
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# INPUT PLUGINS #
|
# INPUTS #
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
|
|
||||||
# Windows Performance Counters plugin.
|
# Windows Performance Counters plugin.
|
||||||
# These are the recommended method of monitoring system metrics on windows,
|
# These are the recommended method of monitoring system metrics on windows,
|
||||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||||
|
@ -271,8 +120,8 @@
|
||||||
"% Disk Time",
|
"% Disk Time",
|
||||||
"% Disk Read Time",
|
"% Disk Read Time",
|
||||||
"% Disk Write Time",
|
"% Disk Write Time",
|
||||||
"% Free Space",
|
|
||||||
"Current Disk Queue Length",
|
"Current Disk Queue Length",
|
||||||
|
"% Free Space",
|
||||||
"Free Megabytes",
|
"Free Megabytes",
|
||||||
]
|
]
|
||||||
Measurement = "win_disk"
|
Measurement = "win_disk"
|
||||||
|
@ -338,6 +187,7 @@
|
||||||
"Standby Cache Reserve Bytes",
|
"Standby Cache Reserve Bytes",
|
||||||
"Standby Cache Normal Priority Bytes",
|
"Standby Cache Normal Priority Bytes",
|
||||||
"Standby Cache Core Bytes",
|
"Standby Cache Core Bytes",
|
||||||
|
|
||||||
]
|
]
|
||||||
# Use 6 x - to remove the Instance bit from the query.
|
# Use 6 x - to remove the Instance bit from the query.
|
||||||
Instances = ["------"]
|
Instances = ["------"]
|
||||||
|
@ -355,31 +205,44 @@
|
||||||
Instances = ["_Total"]
|
Instances = ["_Total"]
|
||||||
Measurement = "win_swap"
|
Measurement = "win_swap"
|
||||||
|
|
||||||
|
[[inputs.win_perf_counters.object]]
|
||||||
|
ObjectName = "Network Interface"
|
||||||
|
Instances = ["*"]
|
||||||
|
Counters = [
|
||||||
|
"Bytes Sent/sec",
|
||||||
|
"Bytes Received/sec",
|
||||||
|
"Packets Sent/sec",
|
||||||
|
"Packets Received/sec",
|
||||||
|
"Packets Received Discarded",
|
||||||
|
"Packets Received Errors",
|
||||||
|
"Packets Outbound Discarded",
|
||||||
|
"Packets Outbound Errors",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Windows system plugins using WMI (disabled by default, using
|
# Windows system plugins using WMI (disabled by default, using
|
||||||
# win_perf_counters over WMI is recommended)
|
# win_perf_counters over WMI is recommended)
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about cpu usage
|
# # Read metrics about cpu usage
|
||||||
# [[inputs.cpu]]
|
# [[inputs.cpu]]
|
||||||
# ## Whether to report per-cpu stats or not
|
# ## Whether to report per-cpu stats or not
|
||||||
# percpu = true
|
# percpu = true
|
||||||
# ## Whether to report total system cpu stats or not
|
# ## Whether to report total system cpu stats or not
|
||||||
# totalcpu = true
|
# totalcpu = true
|
||||||
# ## If true, collect raw CPU time metrics.
|
# ## Comment this line if you want the raw CPU time metrics
|
||||||
# collect_cpu_time = false
|
# fielddrop = ["time_*"]
|
||||||
# ## If true, compute and report the sum of all non-idle CPU states.
|
|
||||||
# report_active = false
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about disk usage by mount point
|
# # Read metrics about disk usage by mount point
|
||||||
# [[inputs.disk]]
|
# [[inputs.disk]]
|
||||||
# ## By default stats will be gathered for all mount points.
|
# ## By default, telegraf gather stats for all mountpoints.
|
||||||
# ## Set mount_points will restrict the stats to only the specified mount points.
|
# ## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||||
# # mount_points = ["/"]
|
# ## mount_points=["/"]
|
||||||
#
|
#
|
||||||
# ## Ignore mount points by filesystem type.
|
# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||||
# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
# ## present on /run, /var/run, /dev/shm or /dev).
|
||||||
|
# # ignore_fs = ["tmpfs", "devtmpfs"]
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about disk IO by device
|
# # Read metrics about disk IO by device
|
||||||
|
@ -387,26 +250,9 @@
|
||||||
# ## By default, telegraf will gather stats for all devices including
|
# ## By default, telegraf will gather stats for all devices including
|
||||||
# ## disk partitions.
|
# ## disk partitions.
|
||||||
# ## Setting devices will restrict the stats to the specified devices.
|
# ## Setting devices will restrict the stats to the specified devices.
|
||||||
# # devices = ["sda", "sdb", "vd*"]
|
# ## devices = ["sda", "sdb"]
|
||||||
# ## Uncomment the following line if you need disk serial numbers.
|
# ## Uncomment the following line if you do not need disk serial numbers.
|
||||||
# # skip_serial_number = false
|
# ## skip_serial_number = true
|
||||||
# #
|
|
||||||
# ## On systems which support it, device metadata can be added in the form of
|
|
||||||
# ## tags.
|
|
||||||
# ## Currently only Linux is supported via udev properties. You can view
|
|
||||||
# ## available properties for a device by running:
|
|
||||||
# ## 'udevadm info -q property -n /dev/sda'
|
|
||||||
# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
|
|
||||||
# #
|
|
||||||
# ## Using the same metadata source as device_tags, you can also customize the
|
|
||||||
# ## name of the device via templates.
|
|
||||||
# ## The 'name_templates' parameter is a list of templates to try and apply to
|
|
||||||
# ## the device. The template may contain variables in the form of '$PROPERTY' or
|
|
||||||
# ## '${PROPERTY}'. The first template which does not contain any variables not
|
|
||||||
# ## present for the device is used as the device name tag.
|
|
||||||
# ## The typical use case is for LVM volumes, to get the VG/LV name instead of
|
|
||||||
# ## the near-meaningless DM-0 name.
|
|
||||||
# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about memory usage
|
# # Read metrics about memory usage
|
||||||
|
@ -417,3 +263,4 @@
|
||||||
# # Read metrics about swap memory usage
|
# # Read metrics about swap memory usage
|
||||||
# [[inputs.swap]]
|
# [[inputs.swap]]
|
||||||
# # no configuration
|
# # no configuration
|
||||||
|
|
||||||
|
|
|
@ -37,24 +37,6 @@ func TestCompile(t *testing.T) {
|
||||||
assert.True(t, f.Match("network"))
|
assert.True(t, f.Match("network"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIncludeExclude(t *testing.T) {
|
|
||||||
tags := []string{}
|
|
||||||
labels := []string{"best", "com_influxdata", "timeseries", "com_influxdata_telegraf", "ever"}
|
|
||||||
|
|
||||||
filter, err := NewIncludeExcludeFilter([]string{}, []string{"com_influx*"})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create include/exclude filter - %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range labels {
|
|
||||||
if filter.Match(labels[i]) {
|
|
||||||
tags = append(tags, labels[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, []string{"best", "timeseries", "ever"}, tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
var benchbool bool
|
var benchbool bool
|
||||||
|
|
||||||
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
|
||||||
|
|
161
go.mod
161
go.mod
|
@ -1,161 +0,0 @@
|
||||||
module github.com/influxdata/telegraf
|
|
||||||
|
|
||||||
go 1.12
|
|
||||||
|
|
||||||
require (
|
|
||||||
cloud.google.com/go v0.53.0
|
|
||||||
cloud.google.com/go/datastore v1.1.0 // indirect
|
|
||||||
cloud.google.com/go/pubsub v1.2.0
|
|
||||||
code.cloudfoundry.org/clock v1.0.0 // indirect
|
|
||||||
collectd.org v0.3.0
|
|
||||||
github.com/Azure/azure-event-hubs-go/v3 v3.2.0
|
|
||||||
github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687
|
|
||||||
github.com/Azure/go-autorest/autorest v0.9.3
|
|
||||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2
|
|
||||||
github.com/BurntSushi/toml v0.3.1
|
|
||||||
github.com/ChimeraCoder/anaconda v2.0.0+incompatible
|
|
||||||
github.com/ChimeraCoder/tokenbucket v0.0.0-20131201223612-c5a927568de7 // indirect
|
|
||||||
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee
|
|
||||||
github.com/Microsoft/ApplicationInsights-Go v0.4.2
|
|
||||||
github.com/Microsoft/go-winio v0.4.9 // indirect
|
|
||||||
github.com/Shopify/sarama v1.24.1
|
|
||||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6
|
|
||||||
github.com/aerospike/aerospike-client-go v1.27.0
|
|
||||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4
|
|
||||||
github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9
|
|
||||||
github.com/apache/thrift v0.12.0
|
|
||||||
github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect
|
|
||||||
github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740
|
|
||||||
github.com/armon/go-metrics v0.3.0 // indirect
|
|
||||||
github.com/aws/aws-sdk-go v1.30.9
|
|
||||||
github.com/azr/backoff v0.0.0-20160115115103-53511d3c7330 // indirect
|
|
||||||
github.com/benbjohnson/clock v1.0.2
|
|
||||||
github.com/bitly/go-hostpool v0.1.0 // indirect
|
|
||||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
|
||||||
github.com/caio/go-tdigest v2.3.0+incompatible // indirect
|
|
||||||
github.com/cenkalti/backoff v2.0.0+incompatible // indirect
|
|
||||||
github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6
|
|
||||||
github.com/cockroachdb/apd v1.1.0 // indirect
|
|
||||||
github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037
|
|
||||||
github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect
|
|
||||||
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect
|
|
||||||
github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4
|
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
|
||||||
github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible // indirect
|
|
||||||
github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133
|
|
||||||
github.com/docker/go-connections v0.3.0 // indirect
|
|
||||||
github.com/docker/go-units v0.3.3 // indirect
|
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166
|
|
||||||
github.com/dustin/go-jsonpointer v0.0.0-20160814072949-ba0abeacc3dc // indirect
|
|
||||||
github.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad // indirect
|
|
||||||
github.com/eclipse/paho.mqtt.golang v1.2.0
|
|
||||||
github.com/ericchiang/k8s v1.2.0
|
|
||||||
github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17 // indirect
|
|
||||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
|
|
||||||
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96
|
|
||||||
github.com/go-logfmt/logfmt v0.4.0
|
|
||||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
|
||||||
github.com/go-redis/redis v6.12.0+incompatible
|
|
||||||
github.com/go-sql-driver/mysql v1.5.0
|
|
||||||
github.com/goburrow/modbus v0.1.0
|
|
||||||
github.com/goburrow/serial v0.1.0 // indirect
|
|
||||||
github.com/gobwas/glob v0.2.3
|
|
||||||
github.com/gofrs/uuid v2.1.0+incompatible
|
|
||||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
|
|
||||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec
|
|
||||||
github.com/golang/protobuf v1.3.5
|
|
||||||
github.com/google/go-cmp v0.4.0
|
|
||||||
github.com/google/go-github v17.0.0+incompatible
|
|
||||||
github.com/google/go-querystring v1.0.0 // indirect
|
|
||||||
github.com/gorilla/mux v1.6.2
|
|
||||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
|
||||||
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0
|
|
||||||
github.com/hashicorp/consul v1.2.1
|
|
||||||
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
|
||||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect
|
|
||||||
github.com/hashicorp/memberlist v0.1.5 // indirect
|
|
||||||
github.com/hashicorp/serf v0.8.1 // indirect
|
|
||||||
github.com/influxdata/go-syslog/v2 v2.0.1
|
|
||||||
github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41
|
|
||||||
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65
|
|
||||||
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8
|
|
||||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
|
|
||||||
github.com/jackc/pgx v3.6.0+incompatible
|
|
||||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
|
||||||
github.com/kardianos/service v1.0.0
|
|
||||||
github.com/karrick/godirwalk v1.12.0
|
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
|
||||||
github.com/klauspost/compress v1.9.2 // indirect
|
|
||||||
github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee
|
|
||||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
|
||||||
github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect
|
|
||||||
github.com/lib/pq v1.3.0 // indirect
|
|
||||||
github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
|
||||||
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe
|
|
||||||
github.com/miekg/dns v1.0.14
|
|
||||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
|
||||||
github.com/mmcdole/gofeed v1.0.0
|
|
||||||
github.com/multiplay/go-ts3 v1.0.0
|
|
||||||
github.com/naoina/go-stringutil v0.1.0 // indirect
|
|
||||||
github.com/nats-io/nats-server/v2 v2.1.4
|
|
||||||
github.com/nats-io/nats.go v1.9.1
|
|
||||||
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0
|
|
||||||
github.com/nsqio/go-nsq v1.0.7
|
|
||||||
github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029
|
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
|
||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
|
||||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
|
|
||||||
github.com/opentracing/opentracing-go v1.0.2 // indirect
|
|
||||||
github.com/openzipkin/zipkin-go-opentracing v0.3.4
|
|
||||||
github.com/pkg/errors v0.9.1
|
|
||||||
github.com/prometheus/client_golang v1.5.1
|
|
||||||
github.com/prometheus/client_model v0.2.0
|
|
||||||
github.com/prometheus/common v0.9.1
|
|
||||||
github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664
|
|
||||||
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect
|
|
||||||
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect
|
|
||||||
github.com/shirou/gopsutil v2.20.2+incompatible
|
|
||||||
github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect
|
|
||||||
github.com/sirupsen/logrus v1.4.2
|
|
||||||
github.com/soniah/gosnmp v1.25.0
|
|
||||||
github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8
|
|
||||||
github.com/stretchr/testify v1.5.1
|
|
||||||
github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62
|
|
||||||
github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect
|
|
||||||
github.com/tidwall/gjson v1.3.0
|
|
||||||
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect
|
|
||||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
|
|
||||||
github.com/vjeantet/grok v1.0.0
|
|
||||||
github.com/vmware/govmomi v0.19.0
|
|
||||||
github.com/wavefronthq/wavefront-sdk-go v0.9.2
|
|
||||||
github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf
|
|
||||||
github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect
|
|
||||||
github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect
|
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect
|
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
|
||||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
|
||||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect
|
|
||||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4
|
|
||||||
golang.org/x/tools v0.0.0-20200317043434-63da46f3035e // indirect
|
|
||||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4
|
|
||||||
gonum.org/v1/gonum v0.6.2 // indirect
|
|
||||||
google.golang.org/api v0.20.0
|
|
||||||
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24
|
|
||||||
google.golang.org/grpc v1.28.0
|
|
||||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
|
||||||
gopkg.in/gorethink/gorethink.v3 v3.0.5
|
|
||||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
|
|
||||||
gopkg.in/ldap.v3 v3.1.0
|
|
||||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
|
|
||||||
gopkg.in/olivere/elastic.v5 v5.0.70
|
|
||||||
gopkg.in/yaml.v2 v2.2.5
|
|
||||||
gotest.tools v2.2.0+incompatible // indirect
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3 // indirect
|
|
||||||
k8s.io/apimachinery v0.17.1 // indirect
|
|
||||||
)
|
|
||||||
|
|
||||||
// replaced due to https://github.com/satori/go.uuid/issues/73
|
|
||||||
replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible
|
|
915
go.sum
915
go.sum
|
@ -1,915 +0,0 @@
|
||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
|
|
||||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
|
||||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
|
||||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
|
||||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
|
||||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
|
||||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
|
||||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
|
||||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
|
||||||
cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8=
|
|
||||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
|
||||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
|
||||||
cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU=
|
|
||||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
|
||||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
|
||||||
cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=
|
|
||||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
|
||||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
|
||||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
|
||||||
cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680=
|
|
||||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
|
||||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
|
||||||
cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus=
|
|
||||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
|
||||||
code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o=
|
|
||||||
code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
|
|
||||||
collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00=
|
|
||||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
|
||||||
gitea.statsd.de/dom/telegraf v0.10.1 h1:RZNof67areTIGhj1hZW1cAZ/4Dbz7HyKVAZ5dTphbuw=
|
|
||||||
github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc=
|
|
||||||
github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg=
|
|
||||||
github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs=
|
|
||||||
github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc=
|
|
||||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
|
||||||
github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg=
|
|
||||||
github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
|
||||||
github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68=
|
|
||||||
github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
|
||||||
github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
|
|
||||||
github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ=
|
|
||||||
github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8=
|
|
||||||
github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY=
|
|
||||||
github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo=
|
|
||||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
|
||||||
github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4=
|
|
||||||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk=
|
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
|
||||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk=
|
|
||||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
|
|
||||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U=
|
|
||||||
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
|
|
||||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
|
||||||
github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
|
|
||||||
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
|
||||||
github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=
|
|
||||||
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
|
|
||||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
|
||||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
|
||||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
|
||||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
|
||||||
github.com/ChimeraCoder/anaconda v1.0.0 h1:B7KZV+CE2iwbC15sh+rh5vaWs4+XJx1XC4iHvHtsZrQ=
|
|
||||||
github.com/ChimeraCoder/anaconda v2.0.0+incompatible h1:F0eD7CHXieZ+VLboCD5UAqCeAzJZxcr90zSCcuJopJs=
|
|
||||||
github.com/ChimeraCoder/anaconda v2.0.0+incompatible/go.mod h1:TCt3MijIq3Qqo9SBtuW/rrM4x7rDfWqYWHj8T7hLcLg=
|
|
||||||
github.com/ChimeraCoder/tokenbucket v0.0.0-20131201223612-c5a927568de7 h1:r+EmXjfPosKO4wfiMLe1XQictsIlhErTufbWUsjOTZs=
|
|
||||||
github.com/ChimeraCoder/tokenbucket v0.0.0-20131201223612-c5a927568de7/go.mod h1:b2EuEMLSG9q3bZ95ql1+8oVqzzrTNSiOQqSXWFBzxeI=
|
|
||||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
|
||||||
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w=
|
|
||||||
github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM=
|
|
||||||
github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg=
|
|
||||||
github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg=
|
|
||||||
github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ=
|
|
||||||
github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
|
||||||
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
|
|
||||||
github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
|
|
||||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
|
||||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
|
||||||
github.com/Shopify/sarama v1.24.1 h1:svn9vfN3R1Hz21WR2Gj0VW9ehaDGkiOS+VqlIcZOkMI=
|
|
||||||
github.com/Shopify/sarama v1.24.1/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU=
|
|
||||||
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
|
|
||||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
|
||||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
|
||||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
|
||||||
github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE=
|
|
||||||
github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc=
|
|
||||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
|
||||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
|
|
||||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
|
||||||
github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ=
|
|
||||||
github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc=
|
|
||||||
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
|
|
||||||
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
|
|
||||||
github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
|
|
||||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
|
||||||
github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos=
|
|
||||||
github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA=
|
|
||||||
github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY=
|
|
||||||
github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
|
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
|
||||||
github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU=
|
|
||||||
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
|
|
||||||
github.com/aws/aws-sdk-go v1.30.9 h1:DntpBUKkchINPDbhEzDRin1eEn1TG9TZFlzWPf0i8to=
|
|
||||||
github.com/aws/aws-sdk-go v1.30.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
|
||||||
github.com/azr/backoff v0.0.0-20160115115103-53511d3c7330 h1:ekDALXAVvY/Ub1UtNta3inKQwZ/jMB/zpOtD8rAYh78=
|
|
||||||
github.com/azr/backoff v0.0.0-20160115115103-53511d3c7330/go.mod h1:nH+k0SvAt3HeiYyOlJpLLv1HG1p7KWP7qU9QPp2/pCo=
|
|
||||||
github.com/benbjohnson/clock v1.0.2 h1:Z0CN0Yb4ig9sGPXkvAQcGJfnrrMQ5QYLCMPRi9iD7YE=
|
|
||||||
github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
|
||||||
github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
|
|
||||||
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
|
|
||||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
|
||||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
|
||||||
github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/fZeGmgmwj2cxxY=
|
|
||||||
github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI=
|
|
||||||
github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY=
|
|
||||||
github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
|
||||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
|
||||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
|
||||||
github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo=
|
|
||||||
github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
|
||||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
|
||||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
|
||||||
github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA=
|
|
||||||
github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ=
|
|
||||||
github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
|
|
||||||
github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4=
|
|
||||||
github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
|
|
||||||
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8=
|
|
||||||
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
|
|
||||||
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o=
|
|
||||||
github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
|
|
||||||
github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=
|
|
||||||
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
|
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
|
||||||
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
|
|
||||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
|
||||||
github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible h1:357nGVUC8gSpeSc2Axup8HfrfTLLUfWfCsCUhiQSKIg=
|
|
||||||
github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
|
||||||
github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133 h1:Kus8nU6ctI/u/l86ljUJl6GpUtmO7gtD/krn4u5dr0M=
|
|
||||||
github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
|
||||||
github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=
|
|
||||||
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
|
||||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
|
||||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ=
|
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
|
||||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
|
||||||
github.com/dustin/go-jsonpointer v0.0.0-20160814072949-ba0abeacc3dc h1:tP7tkU+vIsEOKiK+l/NSLN4uUtkyuxc6hgYpQeCWAeI=
|
|
||||||
github.com/dustin/go-jsonpointer v0.0.0-20160814072949-ba0abeacc3dc/go.mod h1:ORH5Qp2bskd9NzSfKqAF7tKfONsEkCarTE5ESr/RVBw=
|
|
||||||
github.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad h1:Qk76DOWdOp+GlyDKBAG3Klr9cn7N+LcYc82AZ2S7+cA=
|
|
||||||
github.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad/go.mod h1:mPKfmRa823oBIgl2r20LeMSpTAteW5j7FLkc0vjmzyQ=
|
|
||||||
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
|
|
||||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
|
||||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
|
|
||||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
|
||||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
|
||||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
|
||||||
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
|
|
||||||
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
|
|
||||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
|
||||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
|
||||||
github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI=
|
|
||||||
github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4=
|
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
|
||||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
|
||||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
|
||||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
|
||||||
github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg=
|
|
||||||
github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
|
||||||
github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17 h1:GOfMz6cRgTJ9jWV0qAezv642OhPnKEG7gtUjJSdStHE=
|
|
||||||
github.com/garyburd/go-oauth v0.0.0-20180319155456-bca2e7f09a17/go.mod h1:HfkOCN6fkKKaPSAeNq/er3xObxTW4VLeY6UUK895gLQ=
|
|
||||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
|
||||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=
|
|
||||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
|
|
||||||
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8=
|
|
||||||
github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o=
|
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
|
||||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
|
||||||
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
|
||||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
|
||||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
|
||||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
|
||||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
|
||||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
|
||||||
github.com/go-redis/redis v6.12.0+incompatible h1:s+64XI+z/RXqGHz2fQSgRJOEwqqSXeX3dliF7iVkMbE=
|
|
||||||
github.com/go-redis/redis v6.12.0+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
|
||||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
|
||||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
|
||||||
github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro=
|
|
||||||
github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg=
|
|
||||||
github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA=
|
|
||||||
github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA=
|
|
||||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
|
||||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
|
||||||
github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA=
|
|
||||||
github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
|
||||||
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
|
|
||||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
|
||||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
|
||||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
|
|
||||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
|
||||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
|
|
||||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
|
||||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
|
||||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
|
|
||||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
|
||||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
|
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
|
||||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
|
||||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
|
||||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
|
||||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
|
||||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
|
||||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
|
||||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
|
||||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
|
||||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
|
||||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
|
||||||
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
|
|
||||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
|
||||||
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
|
|
||||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
|
||||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
|
|
||||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
|
||||||
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ=
|
|
||||||
github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ=
|
|
||||||
github.com/hashicorp/consul v1.2.1 h1:66MuuTfV4aOXTQM7cjAIKUWFOITSk4XZlMhE09ymVbg=
|
|
||||||
github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
|
|
||||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
|
||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
|
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
|
||||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
|
||||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
|
||||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
|
||||||
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
|
|
||||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
|
||||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
|
||||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
|
||||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
|
||||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E=
|
|
||||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg=
|
|
||||||
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
|
|
||||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
|
||||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
|
||||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
|
||||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM=
|
|
||||||
github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
|
||||||
github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4=
|
|
||||||
github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE=
|
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
|
||||||
github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s=
|
|
||||||
github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo=
|
|
||||||
github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41 h1:HxQo1NpNXQDpvEBzthbQLmePvTLFTa5GzSFUjL03aEs=
|
|
||||||
github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41/go.mod h1:xTFF2SILpIYc5N+Srb0d5qpx7d+f733nBrbasb13DtQ=
|
|
||||||
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY=
|
|
||||||
github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8=
|
|
||||||
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q=
|
|
||||||
github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI=
|
|
||||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
|
|
||||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
|
||||||
github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q=
|
|
||||||
github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
|
||||||
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
|
||||||
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
|
|
||||||
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
|
||||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
|
||||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
|
||||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
|
||||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
|
||||||
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME=
|
|
||||||
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
|
|
||||||
github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw=
|
|
||||||
github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw=
|
|
||||||
github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ=
|
|
||||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
|
||||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
|
||||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
|
||||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
|
||||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
|
||||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
|
||||||
github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0=
|
|
||||||
github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo=
|
|
||||||
github.com/karrick/godirwalk v1.12.0 h1:nkS4xxsjiZMvVlazd0mFyiwD4BR9f3m6LXGhM2TUx3Y=
|
|
||||||
github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
|
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
|
||||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
|
||||||
github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
|
|
||||||
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee h1:MB75LRhfeLER2RF7neSVpYuX/lL8aPi3yPtv5vdOJmk=
|
|
||||||
github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s=
|
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
|
||||||
github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4=
|
|
||||||
github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U=
|
|
||||||
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg=
|
|
||||||
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg=
|
|
||||||
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
|
|
||||||
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|
||||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
|
||||||
github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA=
|
|
||||||
github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
|
||||||
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk=
|
|
||||||
github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw=
|
|
||||||
github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0=
|
|
||||||
github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc=
|
|
||||||
github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
|
|
||||||
github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M=
|
|
||||||
github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg=
|
|
||||||
github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY=
|
|
||||||
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
|
|
||||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
|
||||||
github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws=
|
|
||||||
github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc=
|
|
||||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
|
||||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
|
||||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
|
||||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
|
||||||
github.com/mmcdole/gofeed v1.0.0 h1:PHqwr8fsEm8xarj9s53XeEAFYhRM3E9Ib7Ie766/LTE=
|
|
||||||
github.com/mmcdole/gofeed v1.0.0/go.mod h1:tkVcyzS3qVMlQrQxJoEH1hkTiuo9a8emDzkMi7TZBu0=
|
|
||||||
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf h1:sWGE2v+hO0Nd4yFU/S/mDBM5plIU8v/Qhfz41hkDIAI=
|
|
||||||
github.com/mmcdole/goxpp v0.0.0-20181012175147-0068e33feabf/go.mod h1:pasqhqstspkosTneA62Nc+2p9SOBBYAPbnmRRWPQ0V8=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
|
||||||
github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw=
|
|
||||||
github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ=
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
|
||||||
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
|
|
||||||
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
|
|
||||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
|
||||||
github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI=
|
|
||||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
|
||||||
github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g=
|
|
||||||
github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg=
|
|
||||||
github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=
|
|
||||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
|
||||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
|
||||||
github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k=
|
|
||||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
|
||||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
|
||||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
|
||||||
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY=
|
|
||||||
github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ=
|
|
||||||
github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY=
|
|
||||||
github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito=
|
|
||||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
|
||||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
|
||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w=
|
|
||||||
github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc=
|
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
|
||||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
|
||||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
|
||||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU=
|
|
||||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
|
||||||
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
|
|
||||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
|
||||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
|
||||||
github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g=
|
|
||||||
github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE=
|
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
|
||||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
|
||||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
|
||||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
|
||||||
github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw=
|
|
||||||
github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
|
||||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
|
||||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
|
|
||||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
|
||||||
github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
|
|
||||||
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
|
||||||
github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
|
|
||||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
|
||||||
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
|
|
||||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
|
||||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
|
||||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
|
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
|
||||||
github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk=
|
|
||||||
github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
|
||||||
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY=
|
|
||||||
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
|
||||||
github.com/shirou/gopsutil v2.20.2+incompatible h1:ucK79BhBpgqQxPASyS2cu9HX8cfDVljBN1WWFvbNvgY=
|
|
||||||
github.com/shirou/gopsutil v2.20.2+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
|
||||||
github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A=
|
|
||||||
github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
|
||||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
|
||||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
|
||||||
github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ=
|
|
||||||
github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ=
|
|
||||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
|
||||||
github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ=
|
|
||||||
github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
|
||||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
|
||||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
|
||||||
github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o=
|
|
||||||
github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw=
|
|
||||||
github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg=
|
|
||||||
github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0=
|
|
||||||
github.com/tidwall/gjson v1.3.0 h1:kfpsw1W3trbg4Xm6doUtqSl9+LhLB6qJ9PkltVAQZYs=
|
|
||||||
github.com/tidwall/gjson v1.3.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
|
|
||||||
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
|
||||||
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
|
||||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
|
||||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
|
||||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
|
||||||
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0=
|
|
||||||
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
|
||||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4=
|
|
||||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
|
||||||
github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ=
|
|
||||||
github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo=
|
|
||||||
github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY=
|
|
||||||
github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
|
|
||||||
github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk=
|
|
||||||
github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU=
|
|
||||||
github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q=
|
|
||||||
github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg=
|
|
||||||
github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk=
|
|
||||||
github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4=
|
|
||||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
|
||||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk=
|
|
||||||
github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
|
|
||||||
go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw=
|
|
||||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|
||||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
|
||||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
|
|
||||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w=
|
|
||||||
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU=
|
|
||||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
|
||||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
|
||||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
|
||||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
|
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
|
||||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
|
||||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
|
||||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
|
||||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
|
||||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
|
||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
|
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
|
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
|
|
||||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
|
|
||||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo=
|
|
||||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200317043434-63da46f3035e h1:8ogAbHWoJTPepnVbNRqXLOpzMkl0rtRsM7crbflc4XM=
|
|
||||||
golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8=
|
|
||||||
golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4=
|
|
||||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc=
|
|
||||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c=
|
|
||||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
|
||||||
gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q=
|
|
||||||
gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
|
||||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
|
|
||||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
|
||||||
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
|
|
||||||
google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8=
|
|
||||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
|
||||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
|
||||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=
|
|
||||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
|
||||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
|
||||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
|
|
||||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
|
||||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE=
|
|
||||||
google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
|
||||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
|
||||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
|
|
||||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
|
||||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
|
|
||||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|
||||||
gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg=
|
|
||||||
gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY=
|
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
|
||||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU=
|
|
||||||
gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I=
|
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
|
||||||
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
|
|
||||||
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
|
|
||||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
|
|
||||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
|
|
||||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
|
|
||||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
|
||||||
gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
|
|
||||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4=
|
|
||||||
gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
|
|
||||||
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
|
|
||||||
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
|
|
||||||
gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE=
|
|
||||||
gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ=
|
|
||||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
|
|
||||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
|
||||||
gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE=
|
|
||||||
gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
|
|
||||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
|
||||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
|
||||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
|
||||||
k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM=
|
|
||||||
k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
|
||||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
|
||||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
|
||||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
|
||||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
|
||||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
|
||||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
|
||||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
|
||||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
|
19
input.go
19
input.go
|
@ -1,7 +1,11 @@
|
||||||
package telegraf
|
package telegraf
|
||||||
|
|
||||||
type Input interface {
|
type Input interface {
|
||||||
PluginDescriber
|
// SampleConfig returns the default configuration of the Input
|
||||||
|
SampleConfig() string
|
||||||
|
|
||||||
|
// Description returns a one-sentence description on the Input
|
||||||
|
Description() string
|
||||||
|
|
||||||
// Gather takes in an accumulator and adds the metrics that the Input
|
// Gather takes in an accumulator and adds the metrics that the Input
|
||||||
// gathers. This is called every "interval"
|
// gathers. This is called every "interval"
|
||||||
|
@ -9,10 +13,17 @@ type Input interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ServiceInput interface {
|
type ServiceInput interface {
|
||||||
Input
|
// SampleConfig returns the default configuration of the Input
|
||||||
|
SampleConfig() string
|
||||||
|
|
||||||
// Start the ServiceInput. The Accumulator may be retained and used until
|
// Description returns a one-sentence description on the Input
|
||||||
// Stop returns.
|
Description() string
|
||||||
|
|
||||||
|
// Gather takes in an accumulator and adds the metrics that the Input
|
||||||
|
// gathers. This is called every "interval"
|
||||||
|
Gather(Accumulator) error
|
||||||
|
|
||||||
|
// Start starts the ServiceInput's service, whatever that may be
|
||||||
Start(Accumulator) error
|
Start(Accumulator) error
|
||||||
|
|
||||||
// Stop stops the services and closes any necessary channels and connections
|
// Stop stops the services and closes any necessary channels and connections
|
||||||
|
|
|
@ -0,0 +1,76 @@
|
||||||
|
package buffer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/selfstat"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{})
|
||||||
|
MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{})
|
||||||
|
)
|
||||||
|
|
||||||
|
// Buffer is an object for storing metrics in a circular buffer.
|
||||||
|
type Buffer struct {
|
||||||
|
buf chan telegraf.Metric
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuffer returns a Buffer
|
||||||
|
// size is the maximum number of metrics that Buffer will cache. If Add is
|
||||||
|
// called when the buffer is full, then the oldest metric(s) will be dropped.
|
||||||
|
func NewBuffer(size int) *Buffer {
|
||||||
|
return &Buffer{
|
||||||
|
buf: make(chan telegraf.Metric, size),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmpty returns true if Buffer is empty.
|
||||||
|
func (b *Buffer) IsEmpty() bool {
|
||||||
|
return len(b.buf) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the current length of the buffer.
|
||||||
|
func (b *Buffer) Len() int {
|
||||||
|
return len(b.buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds metrics to the buffer.
|
||||||
|
func (b *Buffer) Add(metrics ...telegraf.Metric) {
|
||||||
|
for i, _ := range metrics {
|
||||||
|
MetricsWritten.Incr(1)
|
||||||
|
select {
|
||||||
|
case b.buf <- metrics[i]:
|
||||||
|
default:
|
||||||
|
b.mu.Lock()
|
||||||
|
MetricsDropped.Incr(1)
|
||||||
|
<-b.buf
|
||||||
|
b.buf <- metrics[i]
|
||||||
|
b.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch returns a batch of metrics of size batchSize.
|
||||||
|
// the batch will be of maximum length batchSize. It can be less than batchSize,
|
||||||
|
// if the length of Buffer is less than batchSize.
|
||||||
|
func (b *Buffer) Batch(batchSize int) []telegraf.Metric {
|
||||||
|
b.mu.Lock()
|
||||||
|
n := min(len(b.buf), batchSize)
|
||||||
|
out := make([]telegraf.Metric, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
out[i] = <-b.buf
|
||||||
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if b < a {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
|
@ -0,0 +1,100 @@
|
||||||
|
package buffer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
var metricList = []telegraf.Metric{
|
||||||
|
testutil.TestMetric(2, "mymetric1"),
|
||||||
|
testutil.TestMetric(1, "mymetric2"),
|
||||||
|
testutil.TestMetric(11, "mymetric3"),
|
||||||
|
testutil.TestMetric(15, "mymetric4"),
|
||||||
|
testutil.TestMetric(8, "mymetric5"),
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAddMetrics(b *testing.B) {
|
||||||
|
buf := NewBuffer(10000)
|
||||||
|
m := testutil.TestMetric(1, "mymetric")
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
buf.Add(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewBufferBasicFuncs(t *testing.T) {
|
||||||
|
b := NewBuffer(10)
|
||||||
|
MetricsDropped.Set(0)
|
||||||
|
MetricsWritten.Set(0)
|
||||||
|
|
||||||
|
assert.True(t, b.IsEmpty())
|
||||||
|
assert.Zero(t, b.Len())
|
||||||
|
assert.Zero(t, MetricsDropped.Get())
|
||||||
|
assert.Zero(t, MetricsWritten.Get())
|
||||||
|
|
||||||
|
m := testutil.TestMetric(1, "mymetric")
|
||||||
|
b.Add(m)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 1)
|
||||||
|
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||||
|
assert.Equal(t, int64(1), MetricsWritten.Get())
|
||||||
|
|
||||||
|
b.Add(metricList...)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 6)
|
||||||
|
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||||
|
assert.Equal(t, int64(6), MetricsWritten.Get())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDroppingMetrics(t *testing.T) {
|
||||||
|
b := NewBuffer(10)
|
||||||
|
MetricsDropped.Set(0)
|
||||||
|
MetricsWritten.Set(0)
|
||||||
|
|
||||||
|
// Add up to the size of the buffer
|
||||||
|
b.Add(metricList...)
|
||||||
|
b.Add(metricList...)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 10)
|
||||||
|
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||||
|
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||||
|
|
||||||
|
// Add 5 more and verify they were dropped
|
||||||
|
b.Add(metricList...)
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 10)
|
||||||
|
assert.Equal(t, int64(5), MetricsDropped.Get())
|
||||||
|
assert.Equal(t, int64(15), MetricsWritten.Get())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGettingBatches(t *testing.T) {
|
||||||
|
b := NewBuffer(20)
|
||||||
|
MetricsDropped.Set(0)
|
||||||
|
MetricsWritten.Set(0)
|
||||||
|
|
||||||
|
// Verify that the buffer returned is smaller than requested when there are
|
||||||
|
// not as many items as requested.
|
||||||
|
b.Add(metricList...)
|
||||||
|
batch := b.Batch(10)
|
||||||
|
assert.Len(t, batch, 5)
|
||||||
|
|
||||||
|
// Verify that the buffer is now empty
|
||||||
|
assert.True(t, b.IsEmpty())
|
||||||
|
assert.Zero(t, b.Len())
|
||||||
|
assert.Zero(t, MetricsDropped.Get())
|
||||||
|
assert.Equal(t, int64(5), MetricsWritten.Get())
|
||||||
|
|
||||||
|
// Verify that the buffer returned is not more than the size requested
|
||||||
|
b.Add(metricList...)
|
||||||
|
batch = b.Batch(3)
|
||||||
|
assert.Len(t, batch, 3)
|
||||||
|
|
||||||
|
// Verify that buffer is not empty
|
||||||
|
assert.False(t, b.IsEmpty())
|
||||||
|
assert.Equal(t, b.Len(), 2)
|
||||||
|
assert.Equal(t, int64(0), MetricsDropped.Get())
|
||||||
|
assert.Equal(t, int64(10), MetricsWritten.Get())
|
||||||
|
}
|
|
@ -1,36 +0,0 @@
|
||||||
// Package choice provides basic functions for working with
|
|
||||||
// plugin options that must be one of several values.
|
|
||||||
package choice
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// Contains return true if the choice in the list of choices.
|
|
||||||
func Contains(choice string, choices []string) bool {
|
|
||||||
for _, item := range choices {
|
|
||||||
if item == choice {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckSContains returns an error if a choice is not one of
|
|
||||||
// the available choices.
|
|
||||||
func Check(choice string, available []string) error {
|
|
||||||
if !Contains(choice, available) {
|
|
||||||
return fmt.Errorf("unknown choice %s", choice)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckSliceContains returns an error if the choices is not a subset of
|
|
||||||
// available.
|
|
||||||
func CheckSlice(choices, available []string) error {
|
|
||||||
for _, choice := range choices {
|
|
||||||
err := Check(choice, available)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -16,7 +16,6 @@ type CredentialConfig struct {
|
||||||
Profile string
|
Profile string
|
||||||
Filename string
|
Filename string
|
||||||
Token string
|
Token string
|
||||||
EndpointURL string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CredentialConfig) Credentials() client.ConfigProvider {
|
func (c *CredentialConfig) Credentials() client.ConfigProvider {
|
||||||
|
@ -30,7 +29,6 @@ func (c *CredentialConfig) Credentials() client.ConfigProvider {
|
||||||
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
|
func (c *CredentialConfig) rootCredentials() client.ConfigProvider {
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Region: aws.String(c.Region),
|
Region: aws.String(c.Region),
|
||||||
Endpoint: &c.EndpointURL,
|
|
||||||
}
|
}
|
||||||
if c.AccessKey != "" || c.SecretKey != "" {
|
if c.AccessKey != "" || c.SecretKey != "" {
|
||||||
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
|
config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
|
||||||
|
@ -45,7 +43,6 @@ func (c *CredentialConfig) assumeCredentials() client.ConfigProvider {
|
||||||
rootCredentials := c.rootCredentials()
|
rootCredentials := c.rootCredentials()
|
||||||
config := &aws.Config{
|
config := &aws.Config{
|
||||||
Region: aws.String(c.Region),
|
Region: aws.String(c.Region),
|
||||||
Endpoint: &c.EndpointURL,
|
|
||||||
}
|
}
|
||||||
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
|
config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN)
|
||||||
return session.New(config)
|
return session.New(config)
|
File diff suppressed because it is too large
Load Diff
|
@ -5,17 +5,14 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal/models"
|
||||||
"github.com/influxdata/telegraf/models"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs"
|
"github.com/influxdata/telegraf/plugins/inputs"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
"github.com/influxdata/telegraf/plugins/inputs/exec"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/http_listener_v2"
|
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
"github.com/influxdata/telegraf/plugins/inputs/memcached"
|
||||||
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
"github.com/influxdata/telegraf/plugins/inputs/procstat"
|
||||||
httpOut "github.com/influxdata/telegraf/plugins/outputs/http"
|
|
||||||
"github.com/influxdata/telegraf/plugins/parsers"
|
"github.com/influxdata/telegraf/plugins/parsers"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
||||||
|
@ -31,17 +28,17 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
|
||||||
|
|
||||||
filter := models.Filter{
|
filter := models.Filter{
|
||||||
NameDrop: []string{"metricname2"},
|
NameDrop: []string{"metricname2"},
|
||||||
NamePass: []string{"metricname1", "ip_192.168.1.1_name"},
|
NamePass: []string{"metricname1"},
|
||||||
FieldDrop: []string{"other", "stuff"},
|
FieldDrop: []string{"other", "stuff"},
|
||||||
FieldPass: []string{"some", "strings"},
|
FieldPass: []string{"some", "strings"},
|
||||||
TagDrop: []models.TagFilter{
|
TagDrop: []models.TagFilter{
|
||||||
{
|
models.TagFilter{
|
||||||
Name: "badtag",
|
Name: "badtag",
|
||||||
Filter: []string{"othertag"},
|
Filter: []string{"othertag"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagPass: []models.TagFilter{
|
TagPass: []models.TagFilter{
|
||||||
{
|
models.TagFilter{
|
||||||
Name: "goodtag",
|
Name: "goodtag",
|
||||||
Filter: []string{"mytag"},
|
Filter: []string{"mytag"},
|
||||||
},
|
},
|
||||||
|
@ -74,13 +71,13 @@ func TestConfig_LoadSingleInput(t *testing.T) {
|
||||||
FieldDrop: []string{"other", "stuff"},
|
FieldDrop: []string{"other", "stuff"},
|
||||||
FieldPass: []string{"some", "strings"},
|
FieldPass: []string{"some", "strings"},
|
||||||
TagDrop: []models.TagFilter{
|
TagDrop: []models.TagFilter{
|
||||||
{
|
models.TagFilter{
|
||||||
Name: "badtag",
|
Name: "badtag",
|
||||||
Filter: []string{"othertag"},
|
Filter: []string{"othertag"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagPass: []models.TagFilter{
|
TagPass: []models.TagFilter{
|
||||||
{
|
models.TagFilter{
|
||||||
Name: "goodtag",
|
Name: "goodtag",
|
||||||
Filter: []string{"mytag"},
|
Filter: []string{"mytag"},
|
||||||
},
|
},
|
||||||
|
@ -120,13 +117,13 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||||
FieldDrop: []string{"other", "stuff"},
|
FieldDrop: []string{"other", "stuff"},
|
||||||
FieldPass: []string{"some", "strings"},
|
FieldPass: []string{"some", "strings"},
|
||||||
TagDrop: []models.TagFilter{
|
TagDrop: []models.TagFilter{
|
||||||
{
|
models.TagFilter{
|
||||||
Name: "badtag",
|
Name: "badtag",
|
||||||
Filter: []string{"othertag"},
|
Filter: []string{"othertag"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagPass: []models.TagFilter{
|
TagPass: []models.TagFilter{
|
||||||
{
|
models.TagFilter{
|
||||||
Name: "goodtag",
|
Name: "goodtag",
|
||||||
Filter: []string{"mytag"},
|
Filter: []string{"mytag"},
|
||||||
},
|
},
|
||||||
|
@ -146,11 +143,7 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||||
"Testdata did not produce correct memcached metadata.")
|
"Testdata did not produce correct memcached metadata.")
|
||||||
|
|
||||||
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
ex := inputs.Inputs["exec"]().(*exec.Exec)
|
||||||
p, err := parsers.NewParser(&parsers.Config{
|
p, err := parsers.NewJSONParser("exec", nil, nil)
|
||||||
MetricName: "exec",
|
|
||||||
DataFormat: "json",
|
|
||||||
JSONStrict: true,
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
ex.SetParser(p)
|
ex.SetParser(p)
|
||||||
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
ex.Command = "/usr/bin/myothercollector --foo=bar"
|
||||||
|
@ -159,11 +152,6 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||||
MeasurementSuffix: "_myothercollector",
|
MeasurementSuffix: "_myothercollector",
|
||||||
}
|
}
|
||||||
eConfig.Tags = make(map[string]string)
|
eConfig.Tags = make(map[string]string)
|
||||||
|
|
||||||
exec := c.Inputs[1].Input.(*exec.Exec)
|
|
||||||
require.NotNil(t, exec.Log)
|
|
||||||
exec.Log = nil
|
|
||||||
|
|
||||||
assert.Equal(t, ex, c.Inputs[1].Input,
|
assert.Equal(t, ex, c.Inputs[1].Input,
|
||||||
"Merged Testdata did not produce a correct exec struct.")
|
"Merged Testdata did not produce a correct exec struct.")
|
||||||
assert.Equal(t, eConfig, c.Inputs[1].Config,
|
assert.Equal(t, eConfig, c.Inputs[1].Config,
|
||||||
|
@ -186,74 +174,3 @@ func TestConfig_LoadDirectory(t *testing.T) {
|
||||||
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
assert.Equal(t, pConfig, c.Inputs[3].Config,
|
||||||
"Merged Testdata did not produce correct procstat metadata.")
|
"Merged Testdata did not produce correct procstat metadata.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig_LoadSpecialTypes(t *testing.T) {
|
|
||||||
c := NewConfig()
|
|
||||||
err := c.LoadConfig("./testdata/special_types.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(c.Inputs))
|
|
||||||
|
|
||||||
inputHTTPListener, ok := c.Inputs[0].Input.(*http_listener_v2.HTTPListenerV2)
|
|
||||||
assert.Equal(t, true, ok)
|
|
||||||
// Tests telegraf duration parsing.
|
|
||||||
assert.Equal(t, internal.Duration{Duration: time.Second}, inputHTTPListener.WriteTimeout)
|
|
||||||
// Tests telegraf size parsing.
|
|
||||||
assert.Equal(t, internal.Size{Size: 1024 * 1024}, inputHTTPListener.MaxBodySize)
|
|
||||||
// Tests toml multiline basic strings.
|
|
||||||
assert.Equal(t, "/path/to/my/cert\n", inputHTTPListener.TLSCert)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfig_FieldNotDefined(t *testing.T) {
|
|
||||||
c := NewConfig()
|
|
||||||
err := c.LoadConfig("./testdata/invalid_field.toml")
|
|
||||||
require.Error(t, err, "invalid field name")
|
|
||||||
assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: Error parsing http_listener_v2, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error())
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfig_WrongFieldType(t *testing.T) {
|
|
||||||
c := NewConfig()
|
|
||||||
err := c.LoadConfig("./testdata/wrong_field_type.toml")
|
|
||||||
require.Error(t, err, "invalid field type")
|
|
||||||
assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error())
|
|
||||||
|
|
||||||
c = NewConfig()
|
|
||||||
err = c.LoadConfig("./testdata/wrong_field_type2.toml")
|
|
||||||
require.Error(t, err, "invalid field type2")
|
|
||||||
assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfig_InlineTables(t *testing.T) {
|
|
||||||
// #4098
|
|
||||||
c := NewConfig()
|
|
||||||
err := c.LoadConfig("./testdata/inline_table.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
require.Equal(t, 2, len(c.Outputs))
|
|
||||||
|
|
||||||
outputHTTP, ok := c.Outputs[1].Output.(*httpOut.HTTP)
|
|
||||||
assert.Equal(t, true, ok)
|
|
||||||
assert.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, outputHTTP.Headers)
|
|
||||||
assert.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfig_SliceComment(t *testing.T) {
|
|
||||||
t.Skipf("Skipping until #3642 is resolved")
|
|
||||||
|
|
||||||
c := NewConfig()
|
|
||||||
err := c.LoadConfig("./testdata/slice_comment.toml")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(c.Outputs))
|
|
||||||
|
|
||||||
outputHTTP, ok := c.Outputs[0].Output.(*httpOut.HTTP)
|
|
||||||
assert.Equal(t, []string{"test"}, outputHTTP.Scopes)
|
|
||||||
assert.Equal(t, true, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfig_BadOrdering(t *testing.T) {
|
|
||||||
// #3444: when not using inline tables, care has to be taken so subsequent configuration
|
|
||||||
// doesn't become part of the table. This is not a bug, but TOML syntax.
|
|
||||||
c := NewConfig()
|
|
||||||
err := c.LoadConfig("./testdata/non_slice_slice.toml")
|
|
||||||
require.Error(t, err, "bad ordering")
|
|
||||||
assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: Error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error())
|
|
||||||
}
|
|
|
@ -1,6 +1,6 @@
|
||||||
[[inputs.memcached]]
|
[[inputs.memcached]]
|
||||||
servers = ["$MY_TEST_SERVER"]
|
servers = ["$MY_TEST_SERVER"]
|
||||||
namepass = ["metricname1", "ip_${MY_TEST_SERVER}_name"]
|
namepass = ["metricname1"]
|
||||||
namedrop = ["metricname2"]
|
namedrop = ["metricname2"]
|
||||||
fieldpass = ["some", "strings"]
|
fieldpass = ["some", "strings"]
|
||||||
fielddrop = ["other", "stuff"]
|
fielddrop = ["other", "stuff"]
|
|
@ -256,7 +256,7 @@
|
||||||
# specify address via a url matching:
|
# specify address via a url matching:
|
||||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||||
# or a simple string:
|
# or a simple string:
|
||||||
# host=localhost user=pqgotest password=... sslmode=... dbname=app_production
|
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||||
#
|
#
|
||||||
# All connection parameters are optional. By default, the host is localhost
|
# All connection parameters are optional. By default, the host is localhost
|
||||||
# and the user is the currently running user. For localhost, we default
|
# and the user is the currently running user. For localhost, we default
|
|
@ -1,182 +0,0 @@
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewStreamContentDecoder returns a reader that will decode the stream
|
|
||||||
// according to the encoding type.
|
|
||||||
func NewStreamContentDecoder(encoding string, r io.Reader) (io.Reader, error) {
|
|
||||||
switch encoding {
|
|
||||||
case "gzip":
|
|
||||||
return NewGzipReader(r)
|
|
||||||
case "identity", "":
|
|
||||||
return r, nil
|
|
||||||
default:
|
|
||||||
return nil, errors.New("invalid value for content_encoding")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GzipReader is similar to gzip.Reader but reads only a single gzip stream per read.
|
|
||||||
type GzipReader struct {
|
|
||||||
r io.Reader
|
|
||||||
z *gzip.Reader
|
|
||||||
endOfStream bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGzipReader(r io.Reader) (io.Reader, error) {
|
|
||||||
// We need a read that implements ByteReader in order to line up the next
|
|
||||||
// stream.
|
|
||||||
br := bufio.NewReader(r)
|
|
||||||
|
|
||||||
// Reads the first gzip stream header.
|
|
||||||
z, err := gzip.NewReader(br)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prevent future calls to Read from reading the following gzip header.
|
|
||||||
z.Multistream(false)
|
|
||||||
|
|
||||||
return &GzipReader{r: br, z: z}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *GzipReader) Read(b []byte) (int, error) {
|
|
||||||
if r.endOfStream {
|
|
||||||
// Reads the next gzip header and prepares for the next stream.
|
|
||||||
err := r.z.Reset(r.r)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
r.z.Multistream(false)
|
|
||||||
r.endOfStream = false
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := r.z.Read(b)
|
|
||||||
|
|
||||||
// Since multistream is disabled, io.EOF indicates the end of the gzip
|
|
||||||
// sequence. On the next read we must read the next gzip header.
|
|
||||||
if err == io.EOF {
|
|
||||||
r.endOfStream = true
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContentEncoder returns a ContentEncoder for the encoding type.
|
|
||||||
func NewContentEncoder(encoding string) (ContentEncoder, error) {
|
|
||||||
switch encoding {
|
|
||||||
case "gzip":
|
|
||||||
return NewGzipEncoder()
|
|
||||||
case "identity", "":
|
|
||||||
return NewIdentityEncoder(), nil
|
|
||||||
default:
|
|
||||||
return nil, errors.New("invalid value for content_encoding")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContentDecoder returns a ContentDecoder for the encoding type.
|
|
||||||
func NewContentDecoder(encoding string) (ContentDecoder, error) {
|
|
||||||
switch encoding {
|
|
||||||
case "gzip":
|
|
||||||
return NewGzipDecoder()
|
|
||||||
case "identity", "":
|
|
||||||
return NewIdentityDecoder(), nil
|
|
||||||
default:
|
|
||||||
return nil, errors.New("invalid value for content_encoding")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentEncoder applies a wrapper encoding to byte buffers.
|
|
||||||
type ContentEncoder interface {
|
|
||||||
Encode([]byte) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GzipEncoder compresses the buffer using gzip at the default level.
|
|
||||||
type GzipEncoder struct {
|
|
||||||
writer *gzip.Writer
|
|
||||||
buf *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGzipEncoder() (*GzipEncoder, error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
return &GzipEncoder{
|
|
||||||
writer: gzip.NewWriter(&buf),
|
|
||||||
buf: &buf,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *GzipEncoder) Encode(data []byte) ([]byte, error) {
|
|
||||||
e.buf.Reset()
|
|
||||||
e.writer.Reset(e.buf)
|
|
||||||
|
|
||||||
_, err := e.writer.Write(data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = e.writer.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return e.buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IdentityEncoder is a null encoder that applies no transformation.
|
|
||||||
type IdentityEncoder struct{}
|
|
||||||
|
|
||||||
func NewIdentityEncoder() *IdentityEncoder {
|
|
||||||
return &IdentityEncoder{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*IdentityEncoder) Encode(data []byte) ([]byte, error) {
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentDecoder removes a wrapper encoding from byte buffers.
|
|
||||||
type ContentDecoder interface {
|
|
||||||
Decode([]byte) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GzipDecoder decompresses buffers with gzip compression.
|
|
||||||
type GzipDecoder struct {
|
|
||||||
reader *gzip.Reader
|
|
||||||
buf *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGzipDecoder() (*GzipDecoder, error) {
|
|
||||||
return &GzipDecoder{
|
|
||||||
reader: new(gzip.Reader),
|
|
||||||
buf: new(bytes.Buffer),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GzipDecoder) Decode(data []byte) ([]byte, error) {
|
|
||||||
d.reader.Reset(bytes.NewBuffer(data))
|
|
||||||
d.buf.Reset()
|
|
||||||
|
|
||||||
_, err := d.buf.ReadFrom(d.reader)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = d.reader.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return d.buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IdentityDecoder is a null decoder that returns the input.
|
|
||||||
type IdentityDecoder struct{}
|
|
||||||
|
|
||||||
func NewIdentityDecoder() *IdentityDecoder {
|
|
||||||
return &IdentityDecoder{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*IdentityDecoder) Decode(data []byte) ([]byte, error) {
|
|
||||||
return data, nil
|
|
||||||
}
|
|
|
@ -1,94 +0,0 @@
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGzipEncodeDecode(t *testing.T) {
|
|
||||||
enc, err := NewGzipEncoder()
|
|
||||||
require.NoError(t, err)
|
|
||||||
dec, err := NewGzipDecoder()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
payload, err := enc.Encode([]byte("howdy"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual, err := dec.Decode(payload)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, "howdy", string(actual))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGzipReuse(t *testing.T) {
|
|
||||||
enc, err := NewGzipEncoder()
|
|
||||||
require.NoError(t, err)
|
|
||||||
dec, err := NewGzipDecoder()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
payload, err := enc.Encode([]byte("howdy"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual, err := dec.Decode(payload)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, "howdy", string(actual))
|
|
||||||
|
|
||||||
payload, err = enc.Encode([]byte("doody"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual, err = dec.Decode(payload)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, "doody", string(actual))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIdentityEncodeDecode(t *testing.T) {
|
|
||||||
enc := NewIdentityEncoder()
|
|
||||||
dec := NewIdentityDecoder()
|
|
||||||
|
|
||||||
payload, err := enc.Encode([]byte("howdy"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual, err := dec.Decode(payload)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, "howdy", string(actual))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStreamIdentityDecode(t *testing.T) {
|
|
||||||
var r bytes.Buffer
|
|
||||||
n, err := r.Write([]byte("howdy"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 5, n)
|
|
||||||
|
|
||||||
dec, err := NewStreamContentDecoder("identity", &r)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(dec)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, []byte("howdy"), data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStreamGzipDecode(t *testing.T) {
|
|
||||||
enc, err := NewGzipEncoder()
|
|
||||||
require.NoError(t, err)
|
|
||||||
written, err := enc.Encode([]byte("howdy"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
w := bytes.NewBuffer(written)
|
|
||||||
|
|
||||||
dec, err := NewStreamContentDecoder("gzip", w)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
b := make([]byte, 10)
|
|
||||||
n, err := dec.Read(b)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 5, n)
|
|
||||||
|
|
||||||
require.Equal(t, []byte("howdy"), b[:n])
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
package docker
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// Adapts some of the logic from the actual Docker library's image parsing
|
|
||||||
// routines:
|
|
||||||
// https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go
|
|
||||||
func ParseImage(image string) (string, string) {
|
|
||||||
domain := ""
|
|
||||||
remainder := ""
|
|
||||||
|
|
||||||
i := strings.IndexRune(image, '/')
|
|
||||||
|
|
||||||
if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") {
|
|
||||||
remainder = image
|
|
||||||
} else {
|
|
||||||
domain, remainder = image[:i], image[i+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
imageName := ""
|
|
||||||
imageVersion := "unknown"
|
|
||||||
|
|
||||||
i = strings.LastIndex(remainder, ":")
|
|
||||||
if i > -1 {
|
|
||||||
imageVersion = remainder[i+1:]
|
|
||||||
imageName = remainder[:i]
|
|
||||||
} else {
|
|
||||||
imageName = remainder
|
|
||||||
}
|
|
||||||
|
|
||||||
if domain != "" {
|
|
||||||
imageName = domain + "/" + imageName
|
|
||||||
}
|
|
||||||
|
|
||||||
return imageName, imageVersion
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
package docker_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/internal/docker"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseImage(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
image string
|
|
||||||
parsedName string
|
|
||||||
parsedVersion string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
image: "postgres",
|
|
||||||
parsedName: "postgres",
|
|
||||||
parsedVersion: "unknown",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
image: "postgres:latest",
|
|
||||||
parsedName: "postgres",
|
|
||||||
parsedVersion: "latest",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
image: "coreos/etcd",
|
|
||||||
parsedName: "coreos/etcd",
|
|
||||||
parsedVersion: "unknown",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
image: "coreos/etcd:latest",
|
|
||||||
parsedName: "coreos/etcd",
|
|
||||||
parsedVersion: "latest",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
image: "quay.io/postgres",
|
|
||||||
parsedName: "quay.io/postgres",
|
|
||||||
parsedVersion: "unknown",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
image: "quay.io:4443/coreos/etcd",
|
|
||||||
parsedName: "quay.io:4443/coreos/etcd",
|
|
||||||
parsedVersion: "unknown",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
image: "quay.io:4443/coreos/etcd:latest",
|
|
||||||
parsedName: "quay.io:4443/coreos/etcd",
|
|
||||||
parsedVersion: "latest",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run("parse name "+tt.image, func(t *testing.T) {
|
|
||||||
imageName, imageVersion := docker.ParseImage(tt.image)
|
|
||||||
require.Equal(t, tt.parsedName, imageName)
|
|
||||||
require.Equal(t, tt.parsedVersion, imageVersion)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,30 +0,0 @@
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"os/exec"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CombinedOutputTimeout runs the given command with the given timeout and
|
|
||||||
// returns the combined output of stdout and stderr.
|
|
||||||
// If the command times out, it attempts to kill the process.
|
|
||||||
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
c.Stdout = &b
|
|
||||||
c.Stderr = &b
|
|
||||||
if err := c.Start(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err := WaitTimeout(c, timeout)
|
|
||||||
return b.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunTimeout runs the given command with the given timeout.
|
|
||||||
// If the command times out, it attempts to kill the process.
|
|
||||||
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
|
|
||||||
if err := c.Start(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return WaitTimeout(c, timeout)
|
|
||||||
}
|
|
|
@ -1,58 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os/exec"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// KillGrace is the amount of time we allow a process to shutdown before
|
|
||||||
// sending a SIGKILL.
|
|
||||||
const KillGrace = 5 * time.Second
|
|
||||||
|
|
||||||
// WaitTimeout waits for the given command to finish with a timeout.
|
|
||||||
// It assumes the command has already been started.
|
|
||||||
// If the command times out, it attempts to kill the process.
|
|
||||||
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
|
||||||
var kill *time.Timer
|
|
||||||
term := time.AfterFunc(timeout, func() {
|
|
||||||
err := c.Process.Signal(syscall.SIGTERM)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error terminating process: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
kill = time.AfterFunc(KillGrace, func() {
|
|
||||||
err := c.Process.Kill()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error killing process: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
err := c.Wait()
|
|
||||||
|
|
||||||
// Shutdown all timers
|
|
||||||
if kill != nil {
|
|
||||||
kill.Stop()
|
|
||||||
}
|
|
||||||
termSent := !term.Stop()
|
|
||||||
|
|
||||||
// If the process exited without error treat it as success. This allows a
|
|
||||||
// process to do a clean shutdown on signal.
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If SIGTERM was sent then treat any process error as a timeout.
|
|
||||||
if termSent {
|
|
||||||
return TimeoutErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise there was an error unrelated to termination.
|
|
||||||
return err
|
|
||||||
}
|
|
|
@ -1,41 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os/exec"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WaitTimeout waits for the given command to finish with a timeout.
|
|
||||||
// It assumes the command has already been started.
|
|
||||||
// If the command times out, it attempts to kill the process.
|
|
||||||
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
|
||||||
timer := time.AfterFunc(timeout, func() {
|
|
||||||
err := c.Process.Kill()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! [agent] Error killing process: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
err := c.Wait()
|
|
||||||
|
|
||||||
// Shutdown all timers
|
|
||||||
termSent := !timer.Stop()
|
|
||||||
|
|
||||||
// If the process exited without error treat it as success. This allows a
|
|
||||||
// process to do a clean shutdown on signal.
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If SIGTERM was sent then treat any process error as a timeout.
|
|
||||||
if termSent {
|
|
||||||
return TimeoutErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise there was an error unrelated to termination.
|
|
||||||
return err
|
|
||||||
}
|
|
|
@ -1,114 +1,108 @@
|
||||||
package globpath
|
package globpath
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gobwas/glob"
|
"github.com/gobwas/glob"
|
||||||
"github.com/karrick/godirwalk"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var sepStr = fmt.Sprintf("%v", string(os.PathSeparator))
|
||||||
|
|
||||||
type GlobPath struct {
|
type GlobPath struct {
|
||||||
path string
|
path string
|
||||||
hasMeta bool
|
hasMeta bool
|
||||||
HasSuperMeta bool
|
hasSuperMeta bool
|
||||||
rootGlob string
|
|
||||||
g glob.Glob
|
g glob.Glob
|
||||||
|
root string
|
||||||
}
|
}
|
||||||
|
|
||||||
func Compile(path string) (*GlobPath, error) {
|
func Compile(path string) (*GlobPath, error) {
|
||||||
out := GlobPath{
|
out := GlobPath{
|
||||||
hasMeta: hasMeta(path),
|
hasMeta: hasMeta(path),
|
||||||
HasSuperMeta: hasSuperMeta(path),
|
hasSuperMeta: hasSuperMeta(path),
|
||||||
path: filepath.FromSlash(path),
|
path: path,
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there are no glob meta characters in the path, don't bother compiling
|
// if there are no glob meta characters in the path, don't bother compiling
|
||||||
// a glob object
|
// a glob object or finding the root directory. (see short-circuit in Match)
|
||||||
if !out.hasMeta || !out.HasSuperMeta {
|
if !out.hasMeta || !out.hasSuperMeta {
|
||||||
return &out, nil
|
return &out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root elements of the object path, the entry point for recursion
|
|
||||||
// when you have a super-meta in your path (which are :
|
|
||||||
// glob(/your/expression/until/first/star/of/super-meta))
|
|
||||||
out.rootGlob = path[:strings.Index(path, "**")+1]
|
|
||||||
var err error
|
var err error
|
||||||
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
|
if out.g, err = glob.Compile(path, os.PathSeparator); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// Get the root directory for this filepath
|
||||||
|
out.root = findRootDir(path)
|
||||||
return &out, nil
|
return &out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match returns all files matching the expression.
|
func (g *GlobPath) Match() map[string]os.FileInfo {
|
||||||
// If it's a static path, returns path.
|
|
||||||
// All returned path will have the host platform separator.
|
|
||||||
func (g *GlobPath) Match() []string {
|
|
||||||
if !g.hasMeta {
|
if !g.hasMeta {
|
||||||
return []string{g.path}
|
out := make(map[string]os.FileInfo)
|
||||||
}
|
info, err := os.Stat(g.path)
|
||||||
if !g.HasSuperMeta {
|
if err == nil {
|
||||||
files, _ := filepath.Glob(g.path)
|
out[g.path] = info
|
||||||
return files
|
|
||||||
}
|
|
||||||
roots, err := filepath.Glob(g.rootGlob)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}
|
|
||||||
}
|
|
||||||
out := []string{}
|
|
||||||
walkfn := func(path string, _ *godirwalk.Dirent) error {
|
|
||||||
if g.g.Match(path) {
|
|
||||||
out = append(out, path)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
for _, root := range roots {
|
|
||||||
fileinfo, err := os.Stat(root)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !fileinfo.IsDir() {
|
|
||||||
if g.MatchString(root) {
|
|
||||||
out = append(out, root)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
godirwalk.Walk(root, &godirwalk.Options{
|
|
||||||
Callback: walkfn,
|
|
||||||
Unsorted: true,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
|
}
|
||||||
|
if !g.hasSuperMeta {
|
||||||
|
out := make(map[string]os.FileInfo)
|
||||||
|
files, _ := filepath.Glob(g.path)
|
||||||
|
for _, file := range files {
|
||||||
|
info, err := os.Stat(file)
|
||||||
|
if err == nil {
|
||||||
|
out[file] = info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
return walkFilePath(g.root, g.g)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MatchString tests the path string against the glob. The path should contain
|
// walk the filepath from the given root and return a list of files that match
|
||||||
// the host platform separator.
|
// the given glob.
|
||||||
func (g *GlobPath) MatchString(path string) bool {
|
func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo {
|
||||||
if !g.HasSuperMeta {
|
matchedFiles := make(map[string]os.FileInfo)
|
||||||
res, _ := filepath.Match(g.path, path)
|
walkfn := func(path string, info os.FileInfo, _ error) error {
|
||||||
return res
|
if g.Match(path) {
|
||||||
|
matchedFiles[path] = info
|
||||||
}
|
}
|
||||||
return g.g.Match(path)
|
return nil
|
||||||
|
}
|
||||||
|
filepath.Walk(root, walkfn)
|
||||||
|
return matchedFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRoots returns a list of files and directories which should be optimal
|
// find the root dir of the given path (could include globs).
|
||||||
// prefixes of matching files when you have a super-meta in your expression :
|
// ie:
|
||||||
// - any directory under these roots may contain a matching file
|
// /var/log/telegraf.conf -> /var/log
|
||||||
// - no file outside of these roots can match the pattern
|
// /home/** -> /home
|
||||||
// Note that it returns both files and directories.
|
// /home/*/** -> /home
|
||||||
// All returned path will have the host platform separator.
|
// /lib/share/*/*/**.txt -> /lib/share
|
||||||
func (g *GlobPath) GetRoots() []string {
|
func findRootDir(path string) string {
|
||||||
if !g.hasMeta {
|
pathItems := strings.Split(path, sepStr)
|
||||||
return []string{g.path}
|
out := sepStr
|
||||||
|
for i, item := range pathItems {
|
||||||
|
if i == len(pathItems)-1 {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
if !g.HasSuperMeta {
|
if item == "" {
|
||||||
matches, _ := filepath.Glob(g.path)
|
continue
|
||||||
return matches
|
|
||||||
}
|
}
|
||||||
roots, _ := filepath.Glob(g.rootGlob)
|
if hasMeta(item) {
|
||||||
return roots
|
break
|
||||||
|
}
|
||||||
|
out += item + sepStr
|
||||||
|
}
|
||||||
|
if out != "/" {
|
||||||
|
out = strings.TrimSuffix(out, "/")
|
||||||
|
}
|
||||||
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasMeta reports whether path contains any magic glob characters.
|
// hasMeta reports whether path contains any magic glob characters.
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
package globpath
|
package globpath
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -27,32 +29,31 @@ func TestCompileAndMatch(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
matches := g1.Match()
|
matches := g1.Match()
|
||||||
require.Len(t, matches, 6)
|
assert.Len(t, matches, 6)
|
||||||
matches = g2.Match()
|
matches = g2.Match()
|
||||||
require.Len(t, matches, 2)
|
assert.Len(t, matches, 2)
|
||||||
matches = g3.Match()
|
matches = g3.Match()
|
||||||
require.Len(t, matches, 1)
|
assert.Len(t, matches, 1)
|
||||||
matches = g4.Match()
|
matches = g4.Match()
|
||||||
require.Len(t, matches, 1)
|
assert.Len(t, matches, 0)
|
||||||
matches = g5.Match()
|
matches = g5.Match()
|
||||||
require.Len(t, matches, 0)
|
assert.Len(t, matches, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRootGlob(t *testing.T) {
|
func TestFindRootDir(t *testing.T) {
|
||||||
dir := getTestdataDir()
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input string
|
input string
|
||||||
output string
|
output string
|
||||||
}{
|
}{
|
||||||
{dir + "/**", dir + "/*"},
|
{"/var/log/telegraf.conf", "/var/log"},
|
||||||
{dir + "/nested?/**", dir + "/nested?/*"},
|
{"/home/**", "/home"},
|
||||||
{dir + "/ne**/nest*", dir + "/ne*"},
|
{"/home/*/**", "/home"},
|
||||||
{dir + "/nested?/*", ""},
|
{"/lib/share/*/*/**.txt", "/lib/share"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
actual, _ := Compile(test.input)
|
actual := findRootDir(test.input)
|
||||||
require.Equal(t, actual.rootGlob, test.output)
|
assert.Equal(t, test.output, actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,7 +64,7 @@ func TestFindNestedTextFile(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
matches := g1.Match()
|
matches := g1.Match()
|
||||||
require.Len(t, matches, 1)
|
assert.Len(t, matches, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTestdataDir() string {
|
func getTestdataDir() string {
|
||||||
|
@ -74,10 +75,10 @@ func getTestdataDir() string {
|
||||||
func TestMatch_ErrPermission(t *testing.T) {
|
func TestMatch_ErrPermission(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input string
|
input string
|
||||||
expected []string
|
expected map[string]os.FileInfo
|
||||||
}{
|
}{
|
||||||
{"/root/foo", []string{"/root/foo"}},
|
{"/root/foo", map[string]os.FileInfo{}},
|
||||||
{"/root/f*", []string(nil)},
|
{"/root/f*", map[string]os.FileInfo{}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
@ -87,14 +88,3 @@ func TestMatch_ErrPermission(t *testing.T) {
|
||||||
require.Equal(t, test.expected, actual)
|
require.Equal(t, test.expected, actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWindowsSeparator(t *testing.T) {
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
t.Skip("Skipping Windows only test")
|
|
||||||
}
|
|
||||||
|
|
||||||
glob, err := Compile("testdata/nested1")
|
|
||||||
require.NoError(t, err)
|
|
||||||
ok := glob.MatchString("testdata\\nested1")
|
|
||||||
require.True(t, ok)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
// +build !goplugin
|
|
||||||
|
|
||||||
package goplugin
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
func LoadExternalPlugins(rootDir string) error {
|
|
||||||
return errors.New("go plugin support is not enabled")
|
|
||||||
}
|
|
|
@ -1,42 +0,0 @@
|
||||||
// +build goplugin
|
|
||||||
|
|
||||||
package goplugin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"plugin"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.)
|
|
||||||
// in the specified directory.
|
|
||||||
func LoadExternalPlugins(rootDir string) error {
|
|
||||||
return filepath.Walk(rootDir, func(pth string, info os.FileInfo, err error) error {
|
|
||||||
// Stop if there was an error.
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ignore directories.
|
|
||||||
if info.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ignore files that aren't shared libraries.
|
|
||||||
ext := strings.ToLower(path.Ext(pth))
|
|
||||||
if ext != ".so" && ext != ".dll" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load plugin.
|
|
||||||
_, err = plugin.Open(pth)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error loading %s: %s", pth, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
108
internal/http.go
108
internal/http.go
|
@ -1,108 +0,0 @@
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/subtle"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BasicAuthErrorFunc func(rw http.ResponseWriter)
|
|
||||||
|
|
||||||
// AuthHandler returns a http handler that requires HTTP basic auth
|
|
||||||
// credentials to match the given username and password.
|
|
||||||
func AuthHandler(username, password, realm string, onError BasicAuthErrorFunc) func(h http.Handler) http.Handler {
|
|
||||||
return func(h http.Handler) http.Handler {
|
|
||||||
return &basicAuthHandler{
|
|
||||||
username: username,
|
|
||||||
password: password,
|
|
||||||
realm: realm,
|
|
||||||
onError: onError,
|
|
||||||
next: h,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type basicAuthHandler struct {
|
|
||||||
username string
|
|
||||||
password string
|
|
||||||
realm string
|
|
||||||
onError BasicAuthErrorFunc
|
|
||||||
next http.Handler
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
|
|
||||||
if h.username != "" || h.password != "" {
|
|
||||||
reqUsername, reqPassword, ok := req.BasicAuth()
|
|
||||||
if !ok ||
|
|
||||||
subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 ||
|
|
||||||
subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 {
|
|
||||||
|
|
||||||
rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"")
|
|
||||||
h.onError(rw)
|
|
||||||
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h.next.ServeHTTP(rw, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorFunc is a callback for writing an error response.
|
|
||||||
type ErrorFunc func(rw http.ResponseWriter, code int)
|
|
||||||
|
|
||||||
// IPRangeHandler returns a http handler that requires the remote address to be
|
|
||||||
// in the specified network.
|
|
||||||
func IPRangeHandler(network []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler {
|
|
||||||
return func(h http.Handler) http.Handler {
|
|
||||||
return &ipRangeHandler{
|
|
||||||
network: network,
|
|
||||||
onError: onError,
|
|
||||||
next: h,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ipRangeHandler struct {
|
|
||||||
network []*net.IPNet
|
|
||||||
onError ErrorFunc
|
|
||||||
next http.Handler
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
|
|
||||||
if len(h.network) == 0 {
|
|
||||||
h.next.ServeHTTP(rw, req)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteIPString, _, err := net.SplitHostPort(req.RemoteAddr)
|
|
||||||
if err != nil {
|
|
||||||
h.onError(rw, http.StatusForbidden)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteIP := net.ParseIP(remoteIPString)
|
|
||||||
if remoteIP == nil {
|
|
||||||
h.onError(rw, http.StatusForbidden)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, net := range h.network {
|
|
||||||
if net.Contains(remoteIP) {
|
|
||||||
h.next.ServeHTTP(rw, req)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h.onError(rw, http.StatusForbidden)
|
|
||||||
}
|
|
||||||
|
|
||||||
func OnClientError(client *http.Client, err error) {
|
|
||||||
// Close connection after a timeout error. If this is a HTTP2
|
|
||||||
// connection this ensures that next interval a new connection will be
|
|
||||||
// used and name lookup will be performed.
|
|
||||||
// https://github.com/golang/go/issues/36026
|
|
||||||
if err, ok := err.(*url.Error); ok && err.Timeout() {
|
|
||||||
client.CloseIdleConnections()
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -3,24 +3,20 @@ package internal
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"crypto/rand"
|
||||||
"context"
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"math"
|
"log"
|
||||||
"math/rand"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"runtime"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||||
|
@ -29,52 +25,13 @@ var (
|
||||||
TimeoutErr = errors.New("Command timed out.")
|
TimeoutErr = errors.New("Command timed out.")
|
||||||
|
|
||||||
NotImplementedError = errors.New("not implemented yet")
|
NotImplementedError = errors.New("not implemented yet")
|
||||||
|
|
||||||
VersionAlreadySetError = errors.New("version has already been set")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Set via the main module
|
|
||||||
var version string
|
|
||||||
|
|
||||||
// Duration just wraps time.Duration
|
// Duration just wraps time.Duration
|
||||||
type Duration struct {
|
type Duration struct {
|
||||||
Duration time.Duration
|
Duration time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size just wraps an int64
|
|
||||||
type Size struct {
|
|
||||||
Size int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type Number struct {
|
|
||||||
Value float64
|
|
||||||
}
|
|
||||||
|
|
||||||
type ReadWaitCloser struct {
|
|
||||||
pipeReader *io.PipeReader
|
|
||||||
wg sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetVersion sets the telegraf agent version
|
|
||||||
func SetVersion(v string) error {
|
|
||||||
if version != "" {
|
|
||||||
return VersionAlreadySetError
|
|
||||||
}
|
|
||||||
version = v
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version returns the telegraf agent version
|
|
||||||
func Version() string {
|
|
||||||
return version
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProductToken returns a tag for Telegraf that can be used in user agents.
|
|
||||||
func ProductToken() string {
|
|
||||||
return fmt.Sprintf("Telegraf/%s Go/%s",
|
|
||||||
Version(), strings.TrimPrefix(runtime.Version(), "go"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalTOML parses the duration from the TOML config file
|
// UnmarshalTOML parses the duration from the TOML config file
|
||||||
func (d *Duration) UnmarshalTOML(b []byte) error {
|
func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||||
var err error
|
var err error
|
||||||
|
@ -110,37 +67,6 @@ func (d *Duration) UnmarshalTOML(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Size) UnmarshalTOML(b []byte) error {
|
|
||||||
var err error
|
|
||||||
b = bytes.Trim(b, `'`)
|
|
||||||
|
|
||||||
val, err := strconv.ParseInt(string(b), 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
s.Size = val
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
uq, err := strconv.Unquote(string(b))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
val, err = units.ParseStrictBytes(uq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.Size = val
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Number) UnmarshalTOML(b []byte) error {
|
|
||||||
value, err := strconv.ParseFloat(string(b), 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
n.Value = value
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadLines reads contents from a file and splits them by new lines.
|
// ReadLines reads contents from a file and splits them by new lines.
|
||||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||||
func ReadLines(filename string) ([]string, error) {
|
func ReadLines(filename string) ([]string, error) {
|
||||||
|
@ -186,6 +112,49 @@ func RandomString(n int) string {
|
||||||
return string(bytes)
|
return string(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
|
||||||
|
// you must give the full path to the files.
|
||||||
|
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
|
||||||
|
func GetTLSConfig(
|
||||||
|
SSLCert, SSLKey, SSLCA string,
|
||||||
|
InsecureSkipVerify bool,
|
||||||
|
) (*tls.Config, error) {
|
||||||
|
if SSLCert == "" && SSLKey == "" && SSLCA == "" && !InsecureSkipVerify {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t := &tls.Config{
|
||||||
|
InsecureSkipVerify: InsecureSkipVerify,
|
||||||
|
}
|
||||||
|
|
||||||
|
if SSLCA != "" {
|
||||||
|
caCert, err := ioutil.ReadFile(SSLCA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
|
||||||
|
err))
|
||||||
|
}
|
||||||
|
|
||||||
|
caCertPool := x509.NewCertPool()
|
||||||
|
caCertPool.AppendCertsFromPEM(caCert)
|
||||||
|
t.RootCAs = caCertPool
|
||||||
|
}
|
||||||
|
|
||||||
|
if SSLCert != "" && SSLKey != "" {
|
||||||
|
cert, err := tls.LoadX509KeyPair(SSLCert, SSLKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New(fmt.Sprintf(
|
||||||
|
"Could not load TLS client key/certificate from %s:%s: %s",
|
||||||
|
SSLKey, SSLCert, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Certificates = []tls.Certificate{cert}
|
||||||
|
t.BuildNameToCertificate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// will be nil by default if nothing is provided
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
// SnakeCase converts the given string to snake case following the Golang format:
|
// SnakeCase converts the given string to snake case following the Golang format:
|
||||||
// acronyms are converted to lower-case and preceded by an underscore.
|
// acronyms are converted to lower-case and preceded by an underscore.
|
||||||
func SnakeCase(in string) string {
|
func SnakeCase(in string) string {
|
||||||
|
@ -203,6 +172,51 @@ func SnakeCase(in string) string {
|
||||||
return string(out)
|
return string(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CombinedOutputTimeout runs the given command with the given timeout and
|
||||||
|
// returns the combined output of stdout and stderr.
|
||||||
|
// If the command times out, it attempts to kill the process.
|
||||||
|
func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
c.Stdout = &b
|
||||||
|
c.Stderr = &b
|
||||||
|
if err := c.Start(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err := WaitTimeout(c, timeout)
|
||||||
|
return b.Bytes(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunTimeout runs the given command with the given timeout.
|
||||||
|
// If the command times out, it attempts to kill the process.
|
||||||
|
func RunTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||||
|
if err := c.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return WaitTimeout(c, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitTimeout waits for the given command to finish with a timeout.
|
||||||
|
// It assumes the command has already been started.
|
||||||
|
// If the command times out, it attempts to kill the process.
|
||||||
|
func WaitTimeout(c *exec.Cmd, timeout time.Duration) error {
|
||||||
|
timer := time.NewTimer(timeout)
|
||||||
|
done := make(chan error)
|
||||||
|
go func() { done <- c.Wait() }()
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
timer.Stop()
|
||||||
|
return err
|
||||||
|
case <-timer.C:
|
||||||
|
if err := c.Process.Kill(); err != nil {
|
||||||
|
log.Printf("E! FATAL error killing process: %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// wait for the command to return after killing it
|
||||||
|
<-done
|
||||||
|
return TimeoutErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// RandomSleep will sleep for a random amount of time up to max.
|
// RandomSleep will sleep for a random amount of time up to max.
|
||||||
// If the shutdown channel is closed, it will return before it has finished
|
// If the shutdown channel is closed, it will return before it has finished
|
||||||
// sleeping.
|
// sleeping.
|
||||||
|
@ -210,8 +224,12 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||||
if max == 0 {
|
if max == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
maxSleep := big.NewInt(max.Nanoseconds())
|
||||||
|
|
||||||
sleepns := rand.Int63n(max.Nanoseconds())
|
var sleepns int64
|
||||||
|
if j, err := rand.Int(rand.Reader, maxSleep); err == nil {
|
||||||
|
sleepns = j.Int64()
|
||||||
|
}
|
||||||
|
|
||||||
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
|
t := time.NewTimer(time.Nanosecond * time.Duration(sleepns))
|
||||||
select {
|
select {
|
||||||
|
@ -222,203 +240,3 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RandomDuration returns a random duration between 0 and max.
|
|
||||||
func RandomDuration(max time.Duration) time.Duration {
|
|
||||||
if max == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
sleepns := rand.Int63n(max.Nanoseconds())
|
|
||||||
|
|
||||||
return time.Duration(sleepns)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SleepContext sleeps until the context is closed or the duration is reached.
|
|
||||||
func SleepContext(ctx context.Context, duration time.Duration) error {
|
|
||||||
if duration == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
t := time.NewTimer(duration)
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
t.Stop()
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AlignDuration returns the duration until next aligned interval.
|
|
||||||
// If the current time is aligned a 0 duration is returned.
|
|
||||||
func AlignDuration(tm time.Time, interval time.Duration) time.Duration {
|
|
||||||
return AlignTime(tm, interval).Sub(tm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AlignTime returns the time of the next aligned interval.
|
|
||||||
// If the current time is aligned the current time is returned.
|
|
||||||
func AlignTime(tm time.Time, interval time.Duration) time.Time {
|
|
||||||
truncated := tm.Truncate(interval)
|
|
||||||
if truncated == tm {
|
|
||||||
return tm
|
|
||||||
}
|
|
||||||
return truncated.Add(interval)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exit status takes the error from exec.Command
|
|
||||||
// and returns the exit status and true
|
|
||||||
// if error is not exit status, will return 0 and false
|
|
||||||
func ExitStatus(err error) (int, bool) {
|
|
||||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
|
||||||
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
|
||||||
return status.ExitStatus(), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ReadWaitCloser) Close() error {
|
|
||||||
err := r.pipeReader.Close()
|
|
||||||
r.wg.Wait() // wait for the gzip goroutine finish
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompressWithGzip takes an io.Reader as input and pipes
|
|
||||||
// it through a gzip.Writer returning an io.Reader containing
|
|
||||||
// the gzipped data.
|
|
||||||
// An error is returned if passing data to the gzip.Writer fails
|
|
||||||
func CompressWithGzip(data io.Reader) (io.ReadCloser, error) {
|
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
|
||||||
gzipWriter := gzip.NewWriter(pipeWriter)
|
|
||||||
|
|
||||||
rc := &ReadWaitCloser{
|
|
||||||
pipeReader: pipeReader,
|
|
||||||
}
|
|
||||||
|
|
||||||
rc.wg.Add(1)
|
|
||||||
var err error
|
|
||||||
go func() {
|
|
||||||
_, err = io.Copy(gzipWriter, data)
|
|
||||||
gzipWriter.Close()
|
|
||||||
// subsequent reads from the read half of the pipe will
|
|
||||||
// return no bytes and the error err, or EOF if err is nil.
|
|
||||||
pipeWriter.CloseWithError(err)
|
|
||||||
rc.wg.Done()
|
|
||||||
}()
|
|
||||||
|
|
||||||
return pipeReader, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseTimestamp parses a Time according to the standard Telegraf options.
|
|
||||||
// These are generally displayed in the toml similar to:
|
|
||||||
// json_time_key= "timestamp"
|
|
||||||
// json_time_format = "2006-01-02T15:04:05Z07:00"
|
|
||||||
// json_timezone = "America/Los_Angeles"
|
|
||||||
//
|
|
||||||
// The format can be one of "unix", "unix_ms", "unix_us", "unix_ns", or a Go
|
|
||||||
// time layout suitable for time.Parse.
|
|
||||||
//
|
|
||||||
// When using the "unix" format, a optional fractional component is allowed.
|
|
||||||
// Specific unix time precisions cannot have a fractional component.
|
|
||||||
//
|
|
||||||
// Unix times may be an int64, float64, or string. When using a Go format
|
|
||||||
// string the timestamp must be a string.
|
|
||||||
//
|
|
||||||
// The location is a location string suitable for time.LoadLocation. Unix
|
|
||||||
// times do not use the location string, a unix time is always return in the
|
|
||||||
// UTC location.
|
|
||||||
func ParseTimestamp(format string, timestamp interface{}, location string) (time.Time, error) {
|
|
||||||
switch format {
|
|
||||||
case "unix", "unix_ms", "unix_us", "unix_ns":
|
|
||||||
return parseUnix(format, timestamp)
|
|
||||||
default:
|
|
||||||
if location == "" {
|
|
||||||
location = "UTC"
|
|
||||||
}
|
|
||||||
return parseTime(format, timestamp, location)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseUnix(format string, timestamp interface{}) (time.Time, error) {
|
|
||||||
integer, fractional, err := parseComponents(timestamp)
|
|
||||||
if err != nil {
|
|
||||||
return time.Unix(0, 0), err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch strings.ToLower(format) {
|
|
||||||
case "unix":
|
|
||||||
return time.Unix(integer, fractional).UTC(), nil
|
|
||||||
case "unix_ms":
|
|
||||||
return time.Unix(0, integer*1e6).UTC(), nil
|
|
||||||
case "unix_us":
|
|
||||||
return time.Unix(0, integer*1e3).UTC(), nil
|
|
||||||
case "unix_ns":
|
|
||||||
return time.Unix(0, integer).UTC(), nil
|
|
||||||
default:
|
|
||||||
return time.Unix(0, 0), errors.New("unsupported type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the integers before and after an optional decimal point. Both '.'
|
|
||||||
// and ',' are supported for the decimal point. The timestamp can be an int64,
|
|
||||||
// float64, or string.
|
|
||||||
// ex: "42.5" -> (42, 5, nil)
|
|
||||||
func parseComponents(timestamp interface{}) (int64, int64, error) {
|
|
||||||
switch ts := timestamp.(type) {
|
|
||||||
case string:
|
|
||||||
parts := strings.SplitN(ts, ".", 2)
|
|
||||||
if len(parts) == 2 {
|
|
||||||
return parseUnixTimeComponents(parts[0], parts[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
parts = strings.SplitN(ts, ",", 2)
|
|
||||||
if len(parts) == 2 {
|
|
||||||
return parseUnixTimeComponents(parts[0], parts[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
integer, err := strconv.ParseInt(ts, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, err
|
|
||||||
}
|
|
||||||
return integer, 0, nil
|
|
||||||
case int64:
|
|
||||||
return ts, 0, nil
|
|
||||||
case float64:
|
|
||||||
integer, fractional := math.Modf(ts)
|
|
||||||
return int64(integer), int64(fractional * 1e9), nil
|
|
||||||
default:
|
|
||||||
return 0, 0, errors.New("unsupported type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseUnixTimeComponents(first, second string) (int64, int64, error) {
|
|
||||||
integer, err := strconv.ParseInt(first, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert to nanoseconds, dropping any greater precision.
|
|
||||||
buf := []byte("000000000")
|
|
||||||
copy(buf, second)
|
|
||||||
|
|
||||||
fractional, err := strconv.ParseInt(string(buf), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, err
|
|
||||||
}
|
|
||||||
return integer, fractional, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseTime parses a string timestamp according to the format string.
|
|
||||||
func parseTime(format string, timestamp interface{}, location string) (time.Time, error) {
|
|
||||||
switch ts := timestamp.(type) {
|
|
||||||
case string:
|
|
||||||
loc, err := time.LoadLocation(location)
|
|
||||||
if err != nil {
|
|
||||||
return time.Unix(0, 0), err
|
|
||||||
}
|
|
||||||
return time.ParseInLocation(format, ts, loc)
|
|
||||||
default:
|
|
||||||
return time.Unix(0, 0), errors.New("unsupported type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,19 +1,11 @@
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"crypto/rand"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type SnakeTest struct {
|
type SnakeTest struct {
|
||||||
|
@ -68,30 +60,6 @@ func TestRunTimeout(t *testing.T) {
|
||||||
assert.True(t, elapsed < time.Millisecond*75)
|
assert.True(t, elapsed < time.Millisecond*75)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies behavior of a command that doesn't get killed.
|
|
||||||
func TestRunTimeoutFastExit(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping test due to random failures.")
|
|
||||||
}
|
|
||||||
if echobin == "" {
|
|
||||||
t.Skip("'echo' binary not available on OS, skipping.")
|
|
||||||
}
|
|
||||||
cmd := exec.Command(echobin)
|
|
||||||
start := time.Now()
|
|
||||||
err := RunTimeout(cmd, time.Millisecond*20)
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
log.SetOutput(buf)
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
// Verify that command gets killed in 20ms, with some breathing room
|
|
||||||
assert.True(t, elapsed < time.Millisecond*75)
|
|
||||||
|
|
||||||
// Verify "process already finished" log doesn't occur.
|
|
||||||
time.Sleep(time.Millisecond * 75)
|
|
||||||
require.Equal(t, "", buf.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCombinedOutputTimeout(t *testing.T) {
|
func TestCombinedOutputTimeout(t *testing.T) {
|
||||||
// TODO: Fix this test
|
// TODO: Fix this test
|
||||||
t.Skip("Test failing too often, skip for now and revisit later.")
|
t.Skip("Test failing too often, skip for now and revisit later.")
|
||||||
|
@ -194,298 +162,3 @@ func TestDuration(t *testing.T) {
|
||||||
d.UnmarshalTOML([]byte(`1.5`))
|
d.UnmarshalTOML([]byte(`1.5`))
|
||||||
assert.Equal(t, time.Second, d.Duration)
|
assert.Equal(t, time.Second, d.Duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSize(t *testing.T) {
|
|
||||||
var s Size
|
|
||||||
|
|
||||||
s.UnmarshalTOML([]byte(`"1B"`))
|
|
||||||
assert.Equal(t, int64(1), s.Size)
|
|
||||||
|
|
||||||
s = Size{}
|
|
||||||
s.UnmarshalTOML([]byte(`1`))
|
|
||||||
assert.Equal(t, int64(1), s.Size)
|
|
||||||
|
|
||||||
s = Size{}
|
|
||||||
s.UnmarshalTOML([]byte(`'1'`))
|
|
||||||
assert.Equal(t, int64(1), s.Size)
|
|
||||||
|
|
||||||
s = Size{}
|
|
||||||
s.UnmarshalTOML([]byte(`"1GB"`))
|
|
||||||
assert.Equal(t, int64(1000*1000*1000), s.Size)
|
|
||||||
|
|
||||||
s = Size{}
|
|
||||||
s.UnmarshalTOML([]byte(`"12GiB"`))
|
|
||||||
assert.Equal(t, int64(12*1024*1024*1024), s.Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCompressWithGzip(t *testing.T) {
|
|
||||||
testData := "the quick brown fox jumps over the lazy dog"
|
|
||||||
inputBuffer := bytes.NewBuffer([]byte(testData))
|
|
||||||
|
|
||||||
outputBuffer, err := CompressWithGzip(inputBuffer)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
gzipReader, err := gzip.NewReader(outputBuffer)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
defer gzipReader.Close()
|
|
||||||
|
|
||||||
output, err := ioutil.ReadAll(gzipReader)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, testData, string(output))
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockReader struct {
|
|
||||||
readN uint64 // record the number of calls to Read
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *mockReader) Read(p []byte) (n int, err error) {
|
|
||||||
r.readN++
|
|
||||||
return rand.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCompressWithGzipEarlyClose(t *testing.T) {
|
|
||||||
mr := &mockReader{}
|
|
||||||
|
|
||||||
rc, err := CompressWithGzip(mr)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
n, err := io.CopyN(ioutil.Discard, rc, 10000)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(10000), n)
|
|
||||||
|
|
||||||
r1 := mr.readN
|
|
||||||
err = rc.Close()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
n, err = io.CopyN(ioutil.Discard, rc, 10000)
|
|
||||||
assert.Error(t, io.EOF, err)
|
|
||||||
assert.Equal(t, int64(0), n)
|
|
||||||
|
|
||||||
r2 := mr.readN
|
|
||||||
// no more read to the source after closing
|
|
||||||
assert.Equal(t, r1, r2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVersionAlreadySet(t *testing.T) {
|
|
||||||
err := SetVersion("foo")
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
err = SetVersion("bar")
|
|
||||||
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
assert.IsType(t, VersionAlreadySetError, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "foo", Version())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlignDuration(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
now time.Time
|
|
||||||
interval time.Duration
|
|
||||||
expected time.Duration
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "aligned",
|
|
||||||
now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC),
|
|
||||||
interval: 10 * time.Second,
|
|
||||||
expected: 0 * time.Second,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "standard interval",
|
|
||||||
now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC),
|
|
||||||
interval: 10 * time.Second,
|
|
||||||
expected: 9 * time.Second,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "odd interval",
|
|
||||||
now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC),
|
|
||||||
interval: 3 * time.Second,
|
|
||||||
expected: 2 * time.Second,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sub second interval",
|
|
||||||
now: time.Date(2018, 1, 1, 1, 1, 0, 5e8, time.UTC),
|
|
||||||
interval: 1 * time.Second,
|
|
||||||
expected: 500 * time.Millisecond,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "non divisible not aligned on minutes",
|
|
||||||
now: time.Date(2018, 1, 1, 1, 0, 0, 0, time.UTC),
|
|
||||||
interval: 1*time.Second + 100*time.Millisecond,
|
|
||||||
expected: 400 * time.Millisecond,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "long interval",
|
|
||||||
now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC),
|
|
||||||
interval: 1 * time.Hour,
|
|
||||||
expected: 59 * time.Minute,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
actual := AlignDuration(tt.now, tt.interval)
|
|
||||||
require.Equal(t, tt.expected, actual)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlignTime(t *testing.T) {
|
|
||||||
rfc3339 := func(value string) time.Time {
|
|
||||||
t, _ := time.Parse(time.RFC3339, value)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
now time.Time
|
|
||||||
interval time.Duration
|
|
||||||
expected time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "aligned",
|
|
||||||
now: rfc3339("2018-01-01T01:01:00Z"),
|
|
||||||
interval: 10 * time.Second,
|
|
||||||
expected: rfc3339("2018-01-01T01:01:00Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "aligned",
|
|
||||||
now: rfc3339("2018-01-01T01:01:01Z"),
|
|
||||||
interval: 10 * time.Second,
|
|
||||||
expected: rfc3339("2018-01-01T01:01:10Z"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
actual := AlignTime(tt.now, tt.interval)
|
|
||||||
require.Equal(t, tt.expected, actual)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseTimestamp(t *testing.T) {
|
|
||||||
rfc3339 := func(value string) time.Time {
|
|
||||||
tm, err := time.Parse(time.RFC3339Nano, value)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return tm
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
format string
|
|
||||||
timestamp interface{}
|
|
||||||
location string
|
|
||||||
expected time.Time
|
|
||||||
err bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "parse layout string in utc",
|
|
||||||
format: "2006-01-02 15:04:05",
|
|
||||||
timestamp: "2019-02-20 21:50:34",
|
|
||||||
location: "UTC",
|
|
||||||
expected: rfc3339("2019-02-20T21:50:34Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "parse layout string with invalid timezone",
|
|
||||||
format: "2006-01-02 15:04:05",
|
|
||||||
timestamp: "2019-02-20 21:50:34",
|
|
||||||
location: "InvalidTimeZone",
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "layout regression 6386",
|
|
||||||
format: "02.01.2006 15:04:05",
|
|
||||||
timestamp: "09.07.2019 00:11:00",
|
|
||||||
expected: rfc3339("2019-07-09T00:11:00Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "default location is utc",
|
|
||||||
format: "2006-01-02 15:04:05",
|
|
||||||
timestamp: "2019-02-20 21:50:34",
|
|
||||||
expected: rfc3339("2019-02-20T21:50:34Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix seconds without fractional",
|
|
||||||
format: "unix",
|
|
||||||
timestamp: "1568338208",
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix seconds with fractional",
|
|
||||||
format: "unix",
|
|
||||||
timestamp: "1568338208.500",
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08.500Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix seconds with fractional and comma decimal point",
|
|
||||||
format: "unix",
|
|
||||||
timestamp: "1568338208,500",
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08.500Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix seconds extra precision",
|
|
||||||
format: "unix",
|
|
||||||
timestamp: "1568338208.00000050042",
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08.000000500Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix seconds integer",
|
|
||||||
format: "unix",
|
|
||||||
timestamp: int64(1568338208),
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix seconds float",
|
|
||||||
format: "unix",
|
|
||||||
timestamp: float64(1568338208.500),
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08.500Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix milliseconds",
|
|
||||||
format: "unix_ms",
|
|
||||||
timestamp: "1568338208500",
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08.500Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix milliseconds with fractional is ignored",
|
|
||||||
format: "unix_ms",
|
|
||||||
timestamp: "1568338208500.42",
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08.500Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix microseconds",
|
|
||||||
format: "unix_us",
|
|
||||||
timestamp: "1568338208000500",
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08.000500Z"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unix nanoseconds",
|
|
||||||
format: "unix_ns",
|
|
||||||
timestamp: "1568338208000000500",
|
|
||||||
expected: rfc3339("2019-09-13T01:30:08.000000500Z"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
tm, err := ParseTimestamp(tt.format, tt.timestamp, tt.location)
|
|
||||||
if tt.err {
|
|
||||||
require.Error(t, err)
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, tt.expected, tm)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProductToken(t *testing.T) {
|
|
||||||
token := ProductToken()
|
|
||||||
// Telegraf version depends on the call to SetVersion, it cannot be set
|
|
||||||
// multiple times and is not thread-safe.
|
|
||||||
re := regexp.MustCompile(`^Telegraf/[^\s]+ Go/\d+.\d+(.\d+)?$`)
|
|
||||||
require.True(t, re.MatchString(token), token)
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
package limiter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRateLimiter(t *testing.T) {
|
||||||
|
r := NewRateLimiter(5, time.Second)
|
||||||
|
ticker := time.NewTicker(time.Millisecond * 75)
|
||||||
|
|
||||||
|
// test that we can only get 5 receives from the rate limiter
|
||||||
|
counter := 0
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-r.C:
|
||||||
|
counter++
|
||||||
|
case <-ticker.C:
|
||||||
|
break outer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, 5, counter)
|
||||||
|
r.Stop()
|
||||||
|
// verify that the Stop function closes the channel.
|
||||||
|
_, ok := <-r.C
|
||||||
|
assert.False(t, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRateLimiterMultipleIterations(t *testing.T) {
|
||||||
|
r := NewRateLimiter(5, time.Millisecond*50)
|
||||||
|
ticker := time.NewTicker(time.Millisecond * 250)
|
||||||
|
|
||||||
|
// test that we can get 15 receives from the rate limiter
|
||||||
|
counter := 0
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
break outer
|
||||||
|
case <-r.C:
|
||||||
|
counter++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, counter > 10)
|
||||||
|
r.Stop()
|
||||||
|
// verify that the Stop function closes the channel.
|
||||||
|
_, ok := <-r.C
|
||||||
|
assert.False(t, ok)
|
||||||
|
}
|
|
@ -3,7 +3,6 @@ package models
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
|
||||||
"github.com/influxdata/telegraf/filter"
|
"github.com/influxdata/telegraf/filter"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -79,13 +78,13 @@ func (f *Filter) Compile() error {
|
||||||
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
return fmt.Errorf("Error compiling 'taginclude', %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range f.TagDrop {
|
for i, _ := range f.TagDrop {
|
||||||
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
|
f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
return fmt.Errorf("Error compiling 'tagdrop', %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := range f.TagPass {
|
for i, _ := range f.TagPass {
|
||||||
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
|
f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
return fmt.Errorf("Error compiling 'tagpass', %s", err)
|
||||||
|
@ -94,35 +93,45 @@ func (f *Filter) Compile() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Select returns true if the metric matches according to the
|
// Apply applies the filter to the given measurement name, fields map, and
|
||||||
// namepass/namedrop and tagpass/tagdrop filters. The metric is not modified.
|
// tags map. It will return false if the metric should be "filtered out", and
|
||||||
func (f *Filter) Select(metric telegraf.Metric) bool {
|
// true if the metric should "pass".
|
||||||
|
// It will modify tags & fields in-place if they need to be deleted.
|
||||||
|
func (f *Filter) Apply(
|
||||||
|
measurement string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
) bool {
|
||||||
if !f.isActive {
|
if !f.isActive {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if !f.shouldNamePass(metric.Name()) {
|
// check if the measurement name should pass
|
||||||
|
if !f.shouldNamePass(measurement) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !f.shouldTagsPass(metric.TagList()) {
|
// check if the tags should pass
|
||||||
|
if !f.shouldTagsPass(tags) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// filter fields
|
||||||
|
for fieldkey, _ := range fields {
|
||||||
|
if !f.shouldFieldPass(fieldkey) {
|
||||||
|
delete(fields, fieldkey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(fields) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// filter tags
|
||||||
|
f.filterTags(tags)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Modify removes any tags and fields from the metric according to the
|
|
||||||
// fieldpass/fielddrop and taginclude/tagexclude filters.
|
|
||||||
func (f *Filter) Modify(metric telegraf.Metric) {
|
|
||||||
if !f.isActive {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f.filterFields(metric)
|
|
||||||
f.filterTags(metric)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsActive checking if filter is active
|
// IsActive checking if filter is active
|
||||||
func (f *Filter) IsActive() bool {
|
func (f *Filter) IsActive() bool {
|
||||||
return f.isActive
|
return f.isActive
|
||||||
|
@ -131,6 +140,7 @@ func (f *Filter) IsActive() bool {
|
||||||
// shouldNamePass returns true if the metric should pass, false if should drop
|
// shouldNamePass returns true if the metric should pass, false if should drop
|
||||||
// based on the drop/pass filter parameters
|
// based on the drop/pass filter parameters
|
||||||
func (f *Filter) shouldNamePass(key string) bool {
|
func (f *Filter) shouldNamePass(key string) bool {
|
||||||
|
|
||||||
pass := func(f *Filter) bool {
|
pass := func(f *Filter) bool {
|
||||||
if f.namePass.Match(key) {
|
if f.namePass.Match(key) {
|
||||||
return true
|
return true
|
||||||
|
@ -159,32 +169,47 @@ func (f *Filter) shouldNamePass(key string) bool {
|
||||||
// shouldFieldPass returns true if the metric should pass, false if should drop
|
// shouldFieldPass returns true if the metric should pass, false if should drop
|
||||||
// based on the drop/pass filter parameters
|
// based on the drop/pass filter parameters
|
||||||
func (f *Filter) shouldFieldPass(key string) bool {
|
func (f *Filter) shouldFieldPass(key string) bool {
|
||||||
if f.fieldPass != nil && f.fieldDrop != nil {
|
|
||||||
return f.fieldPass.Match(key) && !f.fieldDrop.Match(key)
|
pass := func(f *Filter) bool {
|
||||||
} else if f.fieldPass != nil {
|
if f.fieldPass.Match(key) {
|
||||||
return f.fieldPass.Match(key)
|
return true
|
||||||
} else if f.fieldDrop != nil {
|
|
||||||
return !f.fieldDrop.Match(key)
|
|
||||||
}
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
drop := func(f *Filter) bool {
|
||||||
|
if f.fieldDrop.Match(key) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.fieldPass != nil && f.fieldDrop != nil {
|
||||||
|
return pass(f) && drop(f)
|
||||||
|
} else if f.fieldPass != nil {
|
||||||
|
return pass(f)
|
||||||
|
} else if f.fieldDrop != nil {
|
||||||
|
return drop(f)
|
||||||
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldTagsPass returns true if the metric should pass, false if should drop
|
// shouldTagsPass returns true if the metric should pass, false if should drop
|
||||||
// based on the tagdrop/tagpass filter parameters
|
// based on the tagdrop/tagpass filter parameters
|
||||||
func (f *Filter) shouldTagsPass(tags []*telegraf.Tag) bool {
|
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
|
||||||
|
|
||||||
pass := func(f *Filter) bool {
|
pass := func(f *Filter) bool {
|
||||||
for _, pat := range f.TagPass {
|
for _, pat := range f.TagPass {
|
||||||
if pat.filter == nil {
|
if pat.filter == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, tag := range tags {
|
if tagval, ok := tags[pat.Name]; ok {
|
||||||
if tag.Key == pat.Name {
|
if pat.filter.Match(tagval) {
|
||||||
if pat.filter.Match(tag.Value) {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,14 +218,12 @@ func (f *Filter) shouldTagsPass(tags []*telegraf.Tag) bool {
|
||||||
if pat.filter == nil {
|
if pat.filter == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, tag := range tags {
|
if tagval, ok := tags[pat.Name]; ok {
|
||||||
if tag.Key == pat.Name {
|
if pat.filter.Match(tagval) {
|
||||||
if pat.filter.Match(tag.Value) {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,42 +242,22 @@ func (f *Filter) shouldTagsPass(tags []*telegraf.Tag) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// filterFields removes fields according to fieldpass/fielddrop.
|
// Apply TagInclude and TagExclude filters.
|
||||||
func (f *Filter) filterFields(metric telegraf.Metric) {
|
// modifies the tags map in-place.
|
||||||
filterKeys := []string{}
|
func (f *Filter) filterTags(tags map[string]string) {
|
||||||
for _, field := range metric.FieldList() {
|
|
||||||
if !f.shouldFieldPass(field.Key) {
|
|
||||||
filterKeys = append(filterKeys, field.Key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range filterKeys {
|
|
||||||
metric.RemoveField(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterTags removes tags according to taginclude/tagexclude.
|
|
||||||
func (f *Filter) filterTags(metric telegraf.Metric) {
|
|
||||||
filterKeys := []string{}
|
|
||||||
if f.tagInclude != nil {
|
if f.tagInclude != nil {
|
||||||
for _, tag := range metric.TagList() {
|
for k, _ := range tags {
|
||||||
if !f.tagInclude.Match(tag.Key) {
|
if !f.tagInclude.Match(k) {
|
||||||
filterKeys = append(filterKeys, tag.Key)
|
delete(tags, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, key := range filterKeys {
|
|
||||||
metric.RemoveTag(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.tagExclude != nil {
|
if f.tagExclude != nil {
|
||||||
for _, tag := range metric.TagList() {
|
for k, _ := range tags {
|
||||||
if f.tagExclude.Match(tag.Key) {
|
if f.tagExclude.Match(k) {
|
||||||
filterKeys = append(filterKeys, tag.Key)
|
delete(tags, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, key := range filterKeys {
|
|
||||||
metric.RemoveTag(key)
|
|
||||||
}
|
|
||||||
}
|
}
|
|
@ -2,30 +2,22 @@ package models
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/influxdata/telegraf/metric"
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFilter_ApplyEmpty(t *testing.T) {
|
func TestFilter_ApplyEmpty(t *testing.T) {
|
||||||
f := Filter{}
|
f := Filter{}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
require.False(t, f.IsActive())
|
assert.False(t, f.IsActive())
|
||||||
|
|
||||||
m, err := metric.New("m",
|
assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{}))
|
||||||
map[string]string{},
|
|
||||||
map[string]interface{}{"value": int64(1)},
|
|
||||||
time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, f.Select(m))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter_ApplyTagsDontPass(t *testing.T) {
|
func TestFilter_ApplyTagsDontPass(t *testing.T) {
|
||||||
filters := []TagFilter{
|
filters := []TagFilter{
|
||||||
{
|
TagFilter{
|
||||||
Name: "cpu",
|
Name: "cpu",
|
||||||
Filter: []string{"cpu-*"},
|
Filter: []string{"cpu-*"},
|
||||||
},
|
},
|
||||||
|
@ -35,14 +27,11 @@ func TestFilter_ApplyTagsDontPass(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
require.True(t, f.IsActive())
|
assert.True(t, f.IsActive())
|
||||||
|
|
||||||
m, err := metric.New("m",
|
assert.False(t, f.Apply("m",
|
||||||
map[string]string{"cpu": "cpu-total"},
|
|
||||||
map[string]interface{}{"value": int64(1)},
|
map[string]interface{}{"value": int64(1)},
|
||||||
time.Now())
|
map[string]string{"cpu": "cpu-total"}))
|
||||||
require.NoError(t, err)
|
|
||||||
require.False(t, f.Select(m))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter_ApplyDeleteFields(t *testing.T) {
|
func TestFilter_ApplyDeleteFields(t *testing.T) {
|
||||||
|
@ -51,19 +40,11 @@ func TestFilter_ApplyDeleteFields(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
require.True(t, f.IsActive())
|
assert.True(t, f.IsActive())
|
||||||
|
|
||||||
m, err := metric.New("m",
|
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||||
map[string]string{},
|
assert.True(t, f.Apply("m", fields, nil))
|
||||||
map[string]interface{}{
|
assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields)
|
||||||
"value": int64(1),
|
|
||||||
"value2": int64(2),
|
|
||||||
},
|
|
||||||
time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, f.Select(m))
|
|
||||||
f.Modify(m)
|
|
||||||
require.Equal(t, map[string]interface{}{"value2": int64(2)}, m.Fields())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
|
func TestFilter_ApplyDeleteAllFields(t *testing.T) {
|
||||||
|
@ -72,19 +53,10 @@ func TestFilter_ApplyDeleteAllFields(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
require.True(t, f.IsActive())
|
assert.True(t, f.IsActive())
|
||||||
|
|
||||||
m, err := metric.New("m",
|
fields := map[string]interface{}{"value": int64(1), "value2": int64(2)}
|
||||||
map[string]string{},
|
assert.False(t, f.Apply("m", fields, nil))
|
||||||
map[string]interface{}{
|
|
||||||
"value": int64(1),
|
|
||||||
"value2": int64(2),
|
|
||||||
},
|
|
||||||
time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.True(t, f.Select(m))
|
|
||||||
f.Modify(m)
|
|
||||||
require.Len(t, m.FieldList(), 0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter_Empty(t *testing.T) {
|
func TestFilter_Empty(t *testing.T) {
|
||||||
|
@ -97,7 +69,7 @@ func TestFilter_Empty(t *testing.T) {
|
||||||
"foo_bar",
|
"foo_bar",
|
||||||
"foo.bar",
|
"foo.bar",
|
||||||
"foo-bar",
|
"foo-bar",
|
||||||
"supercalifragilisticexpialidocious",
|
"supercalifradjulisticexpialidocious",
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, measurement := range measurements {
|
for _, measurement := range measurements {
|
||||||
|
@ -245,11 +217,11 @@ func TestFilter_FieldDrop(t *testing.T) {
|
||||||
|
|
||||||
func TestFilter_TagPass(t *testing.T) {
|
func TestFilter_TagPass(t *testing.T) {
|
||||||
filters := []TagFilter{
|
filters := []TagFilter{
|
||||||
{
|
TagFilter{
|
||||||
Name: "cpu",
|
Name: "cpu",
|
||||||
Filter: []string{"cpu-*"},
|
Filter: []string{"cpu-*"},
|
||||||
},
|
},
|
||||||
{
|
TagFilter{
|
||||||
Name: "mem",
|
Name: "mem",
|
||||||
Filter: []string{"mem_free"},
|
Filter: []string{"mem_free"},
|
||||||
}}
|
}}
|
||||||
|
@ -258,20 +230,20 @@ func TestFilter_TagPass(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
|
|
||||||
passes := [][]*telegraf.Tag{
|
passes := []map[string]string{
|
||||||
{{Key: "cpu", Value: "cpu-total"}},
|
{"cpu": "cpu-total"},
|
||||||
{{Key: "cpu", Value: "cpu-0"}},
|
{"cpu": "cpu-0"},
|
||||||
{{Key: "cpu", Value: "cpu-1"}},
|
{"cpu": "cpu-1"},
|
||||||
{{Key: "cpu", Value: "cpu-2"}},
|
{"cpu": "cpu-2"},
|
||||||
{{Key: "mem", Value: "mem_free"}},
|
{"mem": "mem_free"},
|
||||||
}
|
}
|
||||||
|
|
||||||
drops := [][]*telegraf.Tag{
|
drops := []map[string]string{
|
||||||
{{Key: "cpu", Value: "cputotal"}},
|
{"cpu": "cputotal"},
|
||||||
{{Key: "cpu", Value: "cpu0"}},
|
{"cpu": "cpu0"},
|
||||||
{{Key: "cpu", Value: "cpu1"}},
|
{"cpu": "cpu1"},
|
||||||
{{Key: "cpu", Value: "cpu2"}},
|
{"cpu": "cpu2"},
|
||||||
{{Key: "mem", Value: "mem_used"}},
|
{"mem": "mem_used"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tags := range passes {
|
for _, tags := range passes {
|
||||||
|
@ -289,11 +261,11 @@ func TestFilter_TagPass(t *testing.T) {
|
||||||
|
|
||||||
func TestFilter_TagDrop(t *testing.T) {
|
func TestFilter_TagDrop(t *testing.T) {
|
||||||
filters := []TagFilter{
|
filters := []TagFilter{
|
||||||
{
|
TagFilter{
|
||||||
Name: "cpu",
|
Name: "cpu",
|
||||||
Filter: []string{"cpu-*"},
|
Filter: []string{"cpu-*"},
|
||||||
},
|
},
|
||||||
{
|
TagFilter{
|
||||||
Name: "mem",
|
Name: "mem",
|
||||||
Filter: []string{"mem_free"},
|
Filter: []string{"mem_free"},
|
||||||
}}
|
}}
|
||||||
|
@ -302,20 +274,20 @@ func TestFilter_TagDrop(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
|
|
||||||
drops := [][]*telegraf.Tag{
|
drops := []map[string]string{
|
||||||
{{Key: "cpu", Value: "cpu-total"}},
|
{"cpu": "cpu-total"},
|
||||||
{{Key: "cpu", Value: "cpu-0"}},
|
{"cpu": "cpu-0"},
|
||||||
{{Key: "cpu", Value: "cpu-1"}},
|
{"cpu": "cpu-1"},
|
||||||
{{Key: "cpu", Value: "cpu-2"}},
|
{"cpu": "cpu-2"},
|
||||||
{{Key: "mem", Value: "mem_free"}},
|
{"mem": "mem_free"},
|
||||||
}
|
}
|
||||||
|
|
||||||
passes := [][]*telegraf.Tag{
|
passes := []map[string]string{
|
||||||
{{Key: "cpu", Value: "cputotal"}},
|
{"cpu": "cputotal"},
|
||||||
{{Key: "cpu", Value: "cpu0"}},
|
{"cpu": "cpu0"},
|
||||||
{{Key: "cpu", Value: "cpu1"}},
|
{"cpu": "cpu1"},
|
||||||
{{Key: "cpu", Value: "cpu2"}},
|
{"cpu": "cpu2"},
|
||||||
{{Key: "mem", Value: "mem_used"}},
|
{"mem": "mem_used"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tags := range passes {
|
for _, tags := range passes {
|
||||||
|
@ -332,70 +304,58 @@ func TestFilter_TagDrop(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
func TestFilter_FilterTagsNoMatches(t *testing.T) {
|
||||||
m, err := metric.New("m",
|
pretags := map[string]string{
|
||||||
map[string]string{
|
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"mytag": "foobar",
|
"mytag": "foobar",
|
||||||
},
|
}
|
||||||
map[string]interface{}{"value": int64(1)},
|
|
||||||
time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
f := Filter{
|
f := Filter{
|
||||||
TagExclude: []string{"nomatch"},
|
TagExclude: []string{"nomatch"},
|
||||||
}
|
}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
|
|
||||||
f.filterTags(m)
|
f.filterTags(pretags)
|
||||||
require.Equal(t, map[string]string{
|
assert.Equal(t, map[string]string{
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"mytag": "foobar",
|
"mytag": "foobar",
|
||||||
}, m.Tags())
|
}, pretags)
|
||||||
|
|
||||||
f = Filter{
|
f = Filter{
|
||||||
TagInclude: []string{"nomatch"},
|
TagInclude: []string{"nomatch"},
|
||||||
}
|
}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
|
|
||||||
f.filterTags(m)
|
f.filterTags(pretags)
|
||||||
require.Equal(t, map[string]string{}, m.Tags())
|
assert.Equal(t, map[string]string{}, pretags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter_FilterTagsMatches(t *testing.T) {
|
func TestFilter_FilterTagsMatches(t *testing.T) {
|
||||||
m, err := metric.New("m",
|
pretags := map[string]string{
|
||||||
map[string]string{
|
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"mytag": "foobar",
|
"mytag": "foobar",
|
||||||
},
|
}
|
||||||
map[string]interface{}{"value": int64(1)},
|
|
||||||
time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
f := Filter{
|
f := Filter{
|
||||||
TagExclude: []string{"ho*"},
|
TagExclude: []string{"ho*"},
|
||||||
}
|
}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
|
|
||||||
f.filterTags(m)
|
f.filterTags(pretags)
|
||||||
require.Equal(t, map[string]string{
|
assert.Equal(t, map[string]string{
|
||||||
"mytag": "foobar",
|
"mytag": "foobar",
|
||||||
}, m.Tags())
|
}, pretags)
|
||||||
|
|
||||||
m, err = metric.New("m",
|
pretags = map[string]string{
|
||||||
map[string]string{
|
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"mytag": "foobar",
|
"mytag": "foobar",
|
||||||
},
|
}
|
||||||
map[string]interface{}{"value": int64(1)},
|
|
||||||
time.Now())
|
|
||||||
require.NoError(t, err)
|
|
||||||
f = Filter{
|
f = Filter{
|
||||||
TagInclude: []string{"my*"},
|
TagInclude: []string{"my*"},
|
||||||
}
|
}
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
|
|
||||||
f.filterTags(m)
|
f.filterTags(pretags)
|
||||||
require.Equal(t, map[string]string{
|
assert.Equal(t, map[string]string{
|
||||||
"mytag": "foobar",
|
"mytag": "foobar",
|
||||||
}, m.Tags())
|
}, pretags)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFilter_FilterNamePassAndDrop used for check case when
|
// TestFilter_FilterNamePassAndDrop used for check case when
|
||||||
|
@ -414,7 +374,7 @@ func TestFilter_FilterNamePassAndDrop(t *testing.T) {
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
|
|
||||||
for i, name := range inputData {
|
for i, name := range inputData {
|
||||||
require.Equal(t, f.shouldNamePass(name), expectedResult[i])
|
assert.Equal(t, f.shouldNamePass(name), expectedResult[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,7 +394,7 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
|
|
||||||
for i, field := range inputData {
|
for i, field := range inputData {
|
||||||
require.Equal(t, f.shouldFieldPass(field), expectedResult[i])
|
assert.Equal(t, f.shouldFieldPass(field), expectedResult[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -442,28 +402,29 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
|
||||||
// both parameters were defined
|
// both parameters were defined
|
||||||
// see: https://github.com/influxdata/telegraf/issues/2860
|
// see: https://github.com/influxdata/telegraf/issues/2860
|
||||||
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
|
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
|
||||||
inputData := [][]*telegraf.Tag{
|
|
||||||
{{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "3"}},
|
inputData := []map[string]string{
|
||||||
{{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "2"}},
|
{"tag1": "1", "tag2": "3"},
|
||||||
{{Key: "tag1", Value: "2"}, {Key: "tag2", Value: "1"}},
|
{"tag1": "1", "tag2": "2"},
|
||||||
{{Key: "tag1", Value: "4"}, {Key: "tag2", Value: "1"}},
|
{"tag1": "2", "tag2": "1"},
|
||||||
|
{"tag1": "4", "tag2": "1"},
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedResult := []bool{false, true, false, false}
|
expectedResult := []bool{false, true, false, false}
|
||||||
|
|
||||||
filterPass := []TagFilter{
|
filterPass := []TagFilter{
|
||||||
{
|
TagFilter{
|
||||||
Name: "tag1",
|
Name: "tag1",
|
||||||
Filter: []string{"1", "4"},
|
Filter: []string{"1", "4"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
filterDrop := []TagFilter{
|
filterDrop := []TagFilter{
|
||||||
{
|
TagFilter{
|
||||||
Name: "tag1",
|
Name: "tag1",
|
||||||
Filter: []string{"4"},
|
Filter: []string{"4"},
|
||||||
},
|
},
|
||||||
{
|
TagFilter{
|
||||||
Name: "tag2",
|
Name: "tag2",
|
||||||
Filter: []string{"3"},
|
Filter: []string{"3"},
|
||||||
},
|
},
|
||||||
|
@ -477,49 +438,7 @@ func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
|
||||||
require.NoError(t, f.Compile())
|
require.NoError(t, f.Compile())
|
||||||
|
|
||||||
for i, tag := range inputData {
|
for i, tag := range inputData {
|
||||||
require.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
|
assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkFilter(b *testing.B) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
filter Filter
|
|
||||||
metric telegraf.Metric
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "empty filter",
|
|
||||||
filter: Filter{},
|
|
||||||
metric: testutil.MustMetric("cpu",
|
|
||||||
map[string]string{},
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": 42,
|
|
||||||
},
|
|
||||||
time.Unix(0, 0),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "namepass",
|
|
||||||
filter: Filter{
|
|
||||||
NamePass: []string{"cpu"},
|
|
||||||
},
|
|
||||||
metric: testutil.MustMetric("cpu",
|
|
||||||
map[string]string{},
|
|
||||||
map[string]interface{}{
|
|
||||||
"value": 42,
|
|
||||||
},
|
|
||||||
time.Unix(0, 0),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
b.Run(tt.name, func(b *testing.B) {
|
|
||||||
require.NoError(b, tt.filter.Compile())
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
tt.filter.Select(tt.metric)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,166 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
// makemetric is used by both RunningAggregator & RunningInput
|
||||||
|
// to make metrics.
|
||||||
|
// nameOverride: override the name of the measurement being made.
|
||||||
|
// namePrefix: add this prefix to each measurement name.
|
||||||
|
// nameSuffix: add this suffix to each measurement name.
|
||||||
|
// pluginTags: these are tags that are specific to this plugin.
|
||||||
|
// daemonTags: these are daemon-wide global tags, and get applied after pluginTags.
|
||||||
|
// filter: this is a filter to apply to each metric being made.
|
||||||
|
// applyFilter: if false, the above filter is not applied to each metric.
|
||||||
|
// This is used by Aggregators, because aggregators use filters
|
||||||
|
// on incoming metrics instead of on created metrics.
|
||||||
|
// TODO refactor this to not have such a huge func signature.
|
||||||
|
func makemetric(
|
||||||
|
measurement string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
nameOverride string,
|
||||||
|
namePrefix string,
|
||||||
|
nameSuffix string,
|
||||||
|
pluginTags map[string]string,
|
||||||
|
daemonTags map[string]string,
|
||||||
|
filter Filter,
|
||||||
|
applyFilter bool,
|
||||||
|
mType telegraf.ValueType,
|
||||||
|
t time.Time,
|
||||||
|
) telegraf.Metric {
|
||||||
|
if len(fields) == 0 || len(measurement) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if tags == nil {
|
||||||
|
tags = make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override measurement name if set
|
||||||
|
if len(nameOverride) != 0 {
|
||||||
|
measurement = nameOverride
|
||||||
|
}
|
||||||
|
// Apply measurement prefix and suffix if set
|
||||||
|
if len(namePrefix) != 0 {
|
||||||
|
measurement = namePrefix + measurement
|
||||||
|
}
|
||||||
|
if len(nameSuffix) != 0 {
|
||||||
|
measurement = measurement + nameSuffix
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply plugin-wide tags if set
|
||||||
|
for k, v := range pluginTags {
|
||||||
|
if _, ok := tags[k]; !ok {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Apply daemon-wide tags if set
|
||||||
|
for k, v := range daemonTags {
|
||||||
|
if _, ok := tags[k]; !ok {
|
||||||
|
tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply the metric filter(s)
|
||||||
|
// for aggregators, the filter does not get applied when the metric is made.
|
||||||
|
// instead, the filter is applied to metric incoming into the plugin.
|
||||||
|
// ie, it gets applied in the RunningAggregator.Apply function.
|
||||||
|
if applyFilter {
|
||||||
|
if ok := filter.Apply(measurement, fields, tags); !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range tags {
|
||||||
|
if strings.HasSuffix(k, `\`) {
|
||||||
|
log.Printf("D! Measurement [%s] tag [%s] "+
|
||||||
|
"ends with a backslash, skipping", measurement, k)
|
||||||
|
delete(tags, k)
|
||||||
|
continue
|
||||||
|
} else if strings.HasSuffix(v, `\`) {
|
||||||
|
log.Printf("D! Measurement [%s] tag [%s] has a value "+
|
||||||
|
"ending with a backslash, skipping", measurement, k)
|
||||||
|
delete(tags, k)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range fields {
|
||||||
|
if strings.HasSuffix(k, `\`) {
|
||||||
|
log.Printf("D! Measurement [%s] field [%s] "+
|
||||||
|
"ends with a backslash, skipping", measurement, k)
|
||||||
|
delete(fields, k)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Validate uint64 and float64 fields
|
||||||
|
// convert all int & uint types to int64
|
||||||
|
switch val := v.(type) {
|
||||||
|
case nil:
|
||||||
|
// delete nil fields
|
||||||
|
delete(fields, k)
|
||||||
|
case uint:
|
||||||
|
fields[k] = int64(val)
|
||||||
|
continue
|
||||||
|
case uint8:
|
||||||
|
fields[k] = int64(val)
|
||||||
|
continue
|
||||||
|
case uint16:
|
||||||
|
fields[k] = int64(val)
|
||||||
|
continue
|
||||||
|
case uint32:
|
||||||
|
fields[k] = int64(val)
|
||||||
|
continue
|
||||||
|
case int:
|
||||||
|
fields[k] = int64(val)
|
||||||
|
continue
|
||||||
|
case int8:
|
||||||
|
fields[k] = int64(val)
|
||||||
|
continue
|
||||||
|
case int16:
|
||||||
|
fields[k] = int64(val)
|
||||||
|
continue
|
||||||
|
case int32:
|
||||||
|
fields[k] = int64(val)
|
||||||
|
continue
|
||||||
|
case uint64:
|
||||||
|
// InfluxDB does not support writing uint64
|
||||||
|
if val < uint64(9223372036854775808) {
|
||||||
|
fields[k] = int64(val)
|
||||||
|
} else {
|
||||||
|
fields[k] = int64(9223372036854775807)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case float32:
|
||||||
|
fields[k] = float64(val)
|
||||||
|
continue
|
||||||
|
case float64:
|
||||||
|
// NaNs are invalid values in influxdb, skip measurement
|
||||||
|
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||||
|
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
|
||||||
|
"field, skipping",
|
||||||
|
measurement, k)
|
||||||
|
delete(fields, k)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case string:
|
||||||
|
fields[k] = v
|
||||||
|
default:
|
||||||
|
fields[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := metric.New(measurement, tags, fields, t, mType)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return m
|
||||||
|
}
|
|
@ -0,0 +1,166 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RunningAggregator struct {
|
||||||
|
a telegraf.Aggregator
|
||||||
|
Config *AggregatorConfig
|
||||||
|
|
||||||
|
metrics chan telegraf.Metric
|
||||||
|
|
||||||
|
periodStart time.Time
|
||||||
|
periodEnd time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRunningAggregator(
|
||||||
|
a telegraf.Aggregator,
|
||||||
|
conf *AggregatorConfig,
|
||||||
|
) *RunningAggregator {
|
||||||
|
return &RunningAggregator{
|
||||||
|
a: a,
|
||||||
|
Config: conf,
|
||||||
|
metrics: make(chan telegraf.Metric, 100),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AggregatorConfig containing configuration parameters for the running
|
||||||
|
// aggregator plugin.
|
||||||
|
type AggregatorConfig struct {
|
||||||
|
Name string
|
||||||
|
|
||||||
|
DropOriginal bool
|
||||||
|
NameOverride string
|
||||||
|
MeasurementPrefix string
|
||||||
|
MeasurementSuffix string
|
||||||
|
Tags map[string]string
|
||||||
|
Filter Filter
|
||||||
|
|
||||||
|
Period time.Duration
|
||||||
|
Delay time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunningAggregator) Name() string {
|
||||||
|
return "aggregators." + r.Config.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunningAggregator) MakeMetric(
|
||||||
|
measurement string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
mType telegraf.ValueType,
|
||||||
|
t time.Time,
|
||||||
|
) telegraf.Metric {
|
||||||
|
m := makemetric(
|
||||||
|
measurement,
|
||||||
|
fields,
|
||||||
|
tags,
|
||||||
|
r.Config.NameOverride,
|
||||||
|
r.Config.MeasurementPrefix,
|
||||||
|
r.Config.MeasurementSuffix,
|
||||||
|
r.Config.Tags,
|
||||||
|
nil,
|
||||||
|
r.Config.Filter,
|
||||||
|
false,
|
||||||
|
mType,
|
||||||
|
t,
|
||||||
|
)
|
||||||
|
|
||||||
|
if m != nil {
|
||||||
|
m.SetAggregate(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add applies the given metric to the aggregator.
|
||||||
|
// Before applying to the plugin, it will run any defined filters on the metric.
|
||||||
|
// Apply returns true if the original metric should be dropped.
|
||||||
|
func (r *RunningAggregator) Add(in telegraf.Metric) bool {
|
||||||
|
if r.Config.Filter.IsActive() {
|
||||||
|
// check if the aggregator should apply this metric
|
||||||
|
name := in.Name()
|
||||||
|
fields := in.Fields()
|
||||||
|
tags := in.Tags()
|
||||||
|
t := in.Time()
|
||||||
|
if ok := r.Config.Filter.Apply(name, fields, tags); !ok {
|
||||||
|
// aggregator should not apply this metric
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
in, _ = metric.New(name, tags, fields, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.metrics <- in
|
||||||
|
return r.Config.DropOriginal
|
||||||
|
}
|
||||||
|
func (r *RunningAggregator) add(in telegraf.Metric) {
|
||||||
|
r.a.Add(in)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunningAggregator) push(acc telegraf.Accumulator) {
|
||||||
|
r.a.Push(acc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunningAggregator) reset() {
|
||||||
|
r.a.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run runs the running aggregator, listens for incoming metrics, and waits
|
||||||
|
// for period ticks to tell it when to push and reset the aggregator.
|
||||||
|
func (r *RunningAggregator) Run(
|
||||||
|
acc telegraf.Accumulator,
|
||||||
|
shutdown chan struct{},
|
||||||
|
) {
|
||||||
|
// The start of the period is truncated to the nearest second.
|
||||||
|
//
|
||||||
|
// Every metric then gets it's timestamp checked and is dropped if it
|
||||||
|
// is not within:
|
||||||
|
//
|
||||||
|
// start < t < end + truncation + delay
|
||||||
|
//
|
||||||
|
// So if we start at now = 00:00.2 with a 10s period and 0.3s delay:
|
||||||
|
// now = 00:00.2
|
||||||
|
// start = 00:00
|
||||||
|
// truncation = 00:00.2
|
||||||
|
// end = 00:10
|
||||||
|
// 1st interval: 00:00 - 00:10.5
|
||||||
|
// 2nd interval: 00:10 - 00:20.5
|
||||||
|
// etc.
|
||||||
|
//
|
||||||
|
now := time.Now()
|
||||||
|
r.periodStart = now.Truncate(time.Second)
|
||||||
|
truncation := now.Sub(r.periodStart)
|
||||||
|
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||||
|
time.Sleep(r.Config.Delay)
|
||||||
|
periodT := time.NewTicker(r.Config.Period)
|
||||||
|
defer periodT.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-shutdown:
|
||||||
|
if len(r.metrics) > 0 {
|
||||||
|
// wait until metrics are flushed before exiting
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case m := <-r.metrics:
|
||||||
|
if m.Time().Before(r.periodStart) ||
|
||||||
|
m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) {
|
||||||
|
// the metric is outside the current aggregation period, so
|
||||||
|
// skip it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r.add(m)
|
||||||
|
case <-periodT.C:
|
||||||
|
r.periodStart = r.periodEnd
|
||||||
|
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||||
|
r.push(acc)
|
||||||
|
r.reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,256 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAdd(t *testing.T) {
|
||||||
|
a := &TestAggregator{}
|
||||||
|
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||||
|
Name: "TestRunningAggregator",
|
||||||
|
Filter: Filter{
|
||||||
|
NamePass: []string{"*"},
|
||||||
|
},
|
||||||
|
Period: time.Millisecond * 500,
|
||||||
|
})
|
||||||
|
assert.NoError(t, ra.Config.Filter.Compile())
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
go ra.Run(&acc, make(chan struct{}))
|
||||||
|
|
||||||
|
m := ra.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
time.Now().Add(time.Millisecond*150),
|
||||||
|
)
|
||||||
|
assert.False(t, ra.Add(m))
|
||||||
|
|
||||||
|
for {
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
if atomic.LoadInt64(&a.sum) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||||
|
a := &TestAggregator{}
|
||||||
|
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||||
|
Name: "TestRunningAggregator",
|
||||||
|
Filter: Filter{
|
||||||
|
NamePass: []string{"*"},
|
||||||
|
},
|
||||||
|
Period: time.Millisecond * 500,
|
||||||
|
})
|
||||||
|
assert.NoError(t, ra.Config.Filter.Compile())
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
go ra.Run(&acc, make(chan struct{}))
|
||||||
|
|
||||||
|
// metric before current period
|
||||||
|
m := ra.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
time.Now().Add(-time.Hour),
|
||||||
|
)
|
||||||
|
assert.False(t, ra.Add(m))
|
||||||
|
|
||||||
|
// metric after current period
|
||||||
|
m = ra.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
time.Now().Add(time.Hour),
|
||||||
|
)
|
||||||
|
assert.False(t, ra.Add(m))
|
||||||
|
|
||||||
|
// "now" metric
|
||||||
|
m = ra.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
time.Now().Add(time.Millisecond*50),
|
||||||
|
)
|
||||||
|
assert.False(t, ra.Add(m))
|
||||||
|
|
||||||
|
for {
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
if atomic.LoadInt64(&a.sum) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddAndPushOnePeriod(t *testing.T) {
|
||||||
|
a := &TestAggregator{}
|
||||||
|
ra := NewRunningAggregator(a, &AggregatorConfig{
|
||||||
|
Name: "TestRunningAggregator",
|
||||||
|
Filter: Filter{
|
||||||
|
NamePass: []string{"*"},
|
||||||
|
},
|
||||||
|
Period: time.Millisecond * 500,
|
||||||
|
})
|
||||||
|
assert.NoError(t, ra.Config.Filter.Compile())
|
||||||
|
acc := testutil.Accumulator{}
|
||||||
|
shutdown := make(chan struct{})
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
ra.Run(&acc, shutdown)
|
||||||
|
}()
|
||||||
|
|
||||||
|
m := ra.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
time.Now().Add(time.Millisecond*100),
|
||||||
|
)
|
||||||
|
assert.False(t, ra.Add(m))
|
||||||
|
|
||||||
|
for {
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
if acc.NMetrics() > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)})
|
||||||
|
|
||||||
|
close(shutdown)
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddDropOriginal(t *testing.T) {
|
||||||
|
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||||
|
Name: "TestRunningAggregator",
|
||||||
|
Filter: Filter{
|
||||||
|
NamePass: []string{"RI*"},
|
||||||
|
},
|
||||||
|
DropOriginal: true,
|
||||||
|
})
|
||||||
|
assert.NoError(t, ra.Config.Filter.Compile())
|
||||||
|
|
||||||
|
m := ra.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
time.Now(),
|
||||||
|
)
|
||||||
|
assert.True(t, ra.Add(m))
|
||||||
|
|
||||||
|
// this metric name doesn't match the filter, so Add will return false
|
||||||
|
m2 := ra.MakeMetric(
|
||||||
|
"foobar",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
time.Now(),
|
||||||
|
)
|
||||||
|
assert.False(t, ra.Add(m2))
|
||||||
|
}
|
||||||
|
|
||||||
|
// make an untyped, counter, & gauge metric
|
||||||
|
func TestMakeMetricA(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
|
||||||
|
Name: "TestRunningAggregator",
|
||||||
|
})
|
||||||
|
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
|
||||||
|
|
||||||
|
m := ra.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
m.Type(),
|
||||||
|
telegraf.Untyped,
|
||||||
|
)
|
||||||
|
|
||||||
|
m = ra.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Counter,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
m.Type(),
|
||||||
|
telegraf.Counter,
|
||||||
|
)
|
||||||
|
|
||||||
|
m = ra.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Gauge,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
m.Type(),
|
||||||
|
telegraf.Gauge,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TestAggregator struct {
|
||||||
|
sum int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestAggregator) Description() string { return "" }
|
||||||
|
func (t *TestAggregator) SampleConfig() string { return "" }
|
||||||
|
func (t *TestAggregator) Reset() {
|
||||||
|
atomic.StoreInt64(&t.sum, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestAggregator) Push(acc telegraf.Accumulator) {
|
||||||
|
acc.AddFields("TestMetric",
|
||||||
|
map[string]interface{}{"sum": t.sum},
|
||||||
|
map[string]string{},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestAggregator) Add(in telegraf.Metric) {
|
||||||
|
for _, v := range in.Fields() {
|
||||||
|
if vi, ok := v.(int64); ok {
|
||||||
|
atomic.AddInt64(&t.sum, vi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,96 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/selfstat"
|
||||||
|
)
|
||||||
|
|
||||||
|
var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{})
|
||||||
|
|
||||||
|
type RunningInput struct {
|
||||||
|
Input telegraf.Input
|
||||||
|
Config *InputConfig
|
||||||
|
|
||||||
|
trace bool
|
||||||
|
defaultTags map[string]string
|
||||||
|
|
||||||
|
MetricsGathered selfstat.Stat
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRunningInput(
|
||||||
|
input telegraf.Input,
|
||||||
|
config *InputConfig,
|
||||||
|
) *RunningInput {
|
||||||
|
return &RunningInput{
|
||||||
|
Input: input,
|
||||||
|
Config: config,
|
||||||
|
MetricsGathered: selfstat.Register(
|
||||||
|
"gather",
|
||||||
|
"metrics_gathered",
|
||||||
|
map[string]string{"input": config.Name},
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InputConfig containing a name, interval, and filter
|
||||||
|
type InputConfig struct {
|
||||||
|
Name string
|
||||||
|
NameOverride string
|
||||||
|
MeasurementPrefix string
|
||||||
|
MeasurementSuffix string
|
||||||
|
Tags map[string]string
|
||||||
|
Filter Filter
|
||||||
|
Interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunningInput) Name() string {
|
||||||
|
return "inputs." + r.Config.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeMetric either returns a metric, or returns nil if the metric doesn't
|
||||||
|
// need to be created (because of filtering, an error, etc.)
|
||||||
|
func (r *RunningInput) MakeMetric(
|
||||||
|
measurement string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
tags map[string]string,
|
||||||
|
mType telegraf.ValueType,
|
||||||
|
t time.Time,
|
||||||
|
) telegraf.Metric {
|
||||||
|
m := makemetric(
|
||||||
|
measurement,
|
||||||
|
fields,
|
||||||
|
tags,
|
||||||
|
r.Config.NameOverride,
|
||||||
|
r.Config.MeasurementPrefix,
|
||||||
|
r.Config.MeasurementSuffix,
|
||||||
|
r.Config.Tags,
|
||||||
|
r.defaultTags,
|
||||||
|
r.Config.Filter,
|
||||||
|
true,
|
||||||
|
mType,
|
||||||
|
t,
|
||||||
|
)
|
||||||
|
|
||||||
|
if r.trace && m != nil {
|
||||||
|
fmt.Print("> " + m.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
r.MetricsGathered.Incr(1)
|
||||||
|
GlobalMetricsGathered.Incr(1)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunningInput) Trace() bool {
|
||||||
|
return r.trace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunningInput) SetTrace(trace bool) {
|
||||||
|
r.trace = trace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunningInput) SetDefaultTags(tags map[string]string) {
|
||||||
|
r.defaultTags = tags
|
||||||
|
}
|
|
@ -0,0 +1,463 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMakeMetricNoFields(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
})
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Nil(t, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nil fields should get dropped
|
||||||
|
func TestMakeMetricNilFields(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
})
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": int(101),
|
||||||
|
"nil": nil,
|
||||||
|
},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make an untyped, counter, & gauge metric
|
||||||
|
func TestMakeMetric(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
})
|
||||||
|
|
||||||
|
ri.SetTrace(true)
|
||||||
|
assert.Equal(t, true, ri.Trace())
|
||||||
|
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
m.Type(),
|
||||||
|
telegraf.Untyped,
|
||||||
|
)
|
||||||
|
|
||||||
|
m = ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Counter,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
m.Type(),
|
||||||
|
telegraf.Counter,
|
||||||
|
)
|
||||||
|
|
||||||
|
m = ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Gauge,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
m.Type(),
|
||||||
|
telegraf.Gauge,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeMetricWithPluginTags(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
ri.SetTrace(true)
|
||||||
|
assert.Equal(t, true, ri.Trace())
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
nil,
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeMetricFilteredOut(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
Tags: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
Filter: Filter{NamePass: []string{"foobar"}},
|
||||||
|
})
|
||||||
|
|
||||||
|
ri.SetTrace(true)
|
||||||
|
assert.Equal(t, true, ri.Trace())
|
||||||
|
assert.NoError(t, ri.Config.Filter.Compile())
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
nil,
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Nil(t, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeMetricWithDaemonTags(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
})
|
||||||
|
ri.SetDefaultTags(map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
})
|
||||||
|
|
||||||
|
ri.SetTrace(true)
|
||||||
|
assert.Equal(t, true, ri.Trace())
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make an untyped, counter, & gauge metric
|
||||||
|
func TestMakeMetricInfFields(t *testing.T) {
|
||||||
|
inf := math.Inf(1)
|
||||||
|
ninf := math.Inf(-1)
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
})
|
||||||
|
|
||||||
|
ri.SetTrace(true)
|
||||||
|
assert.Equal(t, true, ri.Trace())
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{
|
||||||
|
"value": int(101),
|
||||||
|
"inf": inf,
|
||||||
|
"ninf": ninf,
|
||||||
|
},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeMetricAllFieldTypes(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
})
|
||||||
|
|
||||||
|
ri.SetTrace(true)
|
||||||
|
assert.Equal(t, true, ri.Trace())
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{
|
||||||
|
"a": int(10),
|
||||||
|
"b": int8(10),
|
||||||
|
"c": int16(10),
|
||||||
|
"d": int32(10),
|
||||||
|
"e": uint(10),
|
||||||
|
"f": uint8(10),
|
||||||
|
"g": uint16(10),
|
||||||
|
"h": uint32(10),
|
||||||
|
"i": uint64(10),
|
||||||
|
"j": float32(10),
|
||||||
|
"k": uint64(9223372036854775810),
|
||||||
|
"l": "foobar",
|
||||||
|
"m": true,
|
||||||
|
},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Contains(t, m.String(), "a=10i")
|
||||||
|
assert.Contains(t, m.String(), "b=10i")
|
||||||
|
assert.Contains(t, m.String(), "c=10i")
|
||||||
|
assert.Contains(t, m.String(), "d=10i")
|
||||||
|
assert.Contains(t, m.String(), "e=10i")
|
||||||
|
assert.Contains(t, m.String(), "f=10i")
|
||||||
|
assert.Contains(t, m.String(), "g=10i")
|
||||||
|
assert.Contains(t, m.String(), "h=10i")
|
||||||
|
assert.Contains(t, m.String(), "i=10i")
|
||||||
|
assert.Contains(t, m.String(), "j=10")
|
||||||
|
assert.NotContains(t, m.String(), "j=10i")
|
||||||
|
assert.Contains(t, m.String(), "k=9223372036854775807i")
|
||||||
|
assert.Contains(t, m.String(), "l=\"foobar\"")
|
||||||
|
assert.Contains(t, m.String(), "m=true")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeMetricNameOverride(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
NameOverride: "foobar",
|
||||||
|
})
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeMetricNamePrefix(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
MeasurementPrefix: "foobar_",
|
||||||
|
})
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeMetricNameSuffix(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
MeasurementSuffix: "_foobar",
|
||||||
|
})
|
||||||
|
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
"RITest",
|
||||||
|
map[string]interface{}{"value": int(101)},
|
||||||
|
map[string]string{},
|
||||||
|
telegraf.Untyped,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
assert.Equal(
|
||||||
|
t,
|
||||||
|
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
|
||||||
|
m.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakeMetric_TrailingSlash(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
measurement string
|
||||||
|
fields map[string]interface{}
|
||||||
|
tags map[string]string
|
||||||
|
expectedNil bool
|
||||||
|
expectedMeasurement string
|
||||||
|
expectedFields map[string]interface{}
|
||||||
|
expectedTags map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Measurement cannot have trailing slash",
|
||||||
|
measurement: `cpu\`,
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"value": int64(42),
|
||||||
|
},
|
||||||
|
tags: map[string]string{},
|
||||||
|
expectedNil: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Field key with trailing slash dropped",
|
||||||
|
measurement: `cpu`,
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"value": int64(42),
|
||||||
|
`bad\`: `xyzzy`,
|
||||||
|
},
|
||||||
|
tags: map[string]string{},
|
||||||
|
expectedMeasurement: `cpu`,
|
||||||
|
expectedFields: map[string]interface{}{
|
||||||
|
"value": int64(42),
|
||||||
|
},
|
||||||
|
expectedTags: map[string]string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Field value with trailing slash okay",
|
||||||
|
measurement: `cpu`,
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"value": int64(42),
|
||||||
|
"ok": `xyzzy\`,
|
||||||
|
},
|
||||||
|
tags: map[string]string{},
|
||||||
|
expectedMeasurement: `cpu`,
|
||||||
|
expectedFields: map[string]interface{}{
|
||||||
|
"value": int64(42),
|
||||||
|
"ok": `xyzzy\`,
|
||||||
|
},
|
||||||
|
expectedTags: map[string]string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Must have one field after dropped",
|
||||||
|
measurement: `cpu`,
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"bad": math.NaN(),
|
||||||
|
},
|
||||||
|
tags: map[string]string{},
|
||||||
|
expectedNil: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Tag key with trailing slash dropped",
|
||||||
|
measurement: `cpu`,
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"value": int64(42),
|
||||||
|
},
|
||||||
|
tags: map[string]string{
|
||||||
|
`host\`: "localhost",
|
||||||
|
"a": "x",
|
||||||
|
},
|
||||||
|
expectedMeasurement: `cpu`,
|
||||||
|
expectedFields: map[string]interface{}{
|
||||||
|
"value": int64(42),
|
||||||
|
},
|
||||||
|
expectedTags: map[string]string{
|
||||||
|
"a": "x",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Tag value with trailing slash dropped",
|
||||||
|
measurement: `cpu`,
|
||||||
|
fields: map[string]interface{}{
|
||||||
|
"value": int64(42),
|
||||||
|
},
|
||||||
|
tags: map[string]string{
|
||||||
|
`host`: `localhost\`,
|
||||||
|
"a": "x",
|
||||||
|
},
|
||||||
|
expectedMeasurement: `cpu`,
|
||||||
|
expectedFields: map[string]interface{}{
|
||||||
|
"value": int64(42),
|
||||||
|
},
|
||||||
|
expectedTags: map[string]string{
|
||||||
|
"a": "x",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ri := NewRunningInput(&testInput{}, &InputConfig{
|
||||||
|
Name: "TestRunningInput",
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
m := ri.MakeMetric(
|
||||||
|
tc.measurement,
|
||||||
|
tc.fields,
|
||||||
|
tc.tags,
|
||||||
|
telegraf.Untyped,
|
||||||
|
now)
|
||||||
|
|
||||||
|
if tc.expectedNil {
|
||||||
|
require.Nil(t, m)
|
||||||
|
} else {
|
||||||
|
require.NotNil(t, m)
|
||||||
|
require.Equal(t, tc.expectedMeasurement, m.Name())
|
||||||
|
require.Equal(t, tc.expectedFields, m.Fields())
|
||||||
|
require.Equal(t, tc.expectedTags, m.Tags())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type testInput struct{}
|
||||||
|
|
||||||
|
func (t *testInput) Description() string { return "" }
|
||||||
|
func (t *testInput) SampleConfig() string { return "" }
|
||||||
|
func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil }
|
|
@ -0,0 +1,194 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/internal/buffer"
|
||||||
|
"github.com/influxdata/telegraf/metric"
|
||||||
|
"github.com/influxdata/telegraf/selfstat"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Default size of metrics batch size.
|
||||||
|
DEFAULT_METRIC_BATCH_SIZE = 1000
|
||||||
|
|
||||||
|
// Default number of metrics kept. It should be a multiple of batch size.
|
||||||
|
DEFAULT_METRIC_BUFFER_LIMIT = 10000
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunningOutput contains the output configuration
|
||||||
|
type RunningOutput struct {
|
||||||
|
Name string
|
||||||
|
Output telegraf.Output
|
||||||
|
Config *OutputConfig
|
||||||
|
MetricBufferLimit int
|
||||||
|
MetricBatchSize int
|
||||||
|
|
||||||
|
MetricsFiltered selfstat.Stat
|
||||||
|
MetricsWritten selfstat.Stat
|
||||||
|
BufferSize selfstat.Stat
|
||||||
|
BufferLimit selfstat.Stat
|
||||||
|
WriteTime selfstat.Stat
|
||||||
|
|
||||||
|
metrics *buffer.Buffer
|
||||||
|
failMetrics *buffer.Buffer
|
||||||
|
|
||||||
|
// Guards against concurrent calls to the Output as described in #3009
|
||||||
|
sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRunningOutput(
|
||||||
|
name string,
|
||||||
|
output telegraf.Output,
|
||||||
|
conf *OutputConfig,
|
||||||
|
batchSize int,
|
||||||
|
bufferLimit int,
|
||||||
|
) *RunningOutput {
|
||||||
|
if bufferLimit == 0 {
|
||||||
|
bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT
|
||||||
|
}
|
||||||
|
if batchSize == 0 {
|
||||||
|
batchSize = DEFAULT_METRIC_BATCH_SIZE
|
||||||
|
}
|
||||||
|
ro := &RunningOutput{
|
||||||
|
Name: name,
|
||||||
|
metrics: buffer.NewBuffer(batchSize),
|
||||||
|
failMetrics: buffer.NewBuffer(bufferLimit),
|
||||||
|
Output: output,
|
||||||
|
Config: conf,
|
||||||
|
MetricBufferLimit: bufferLimit,
|
||||||
|
MetricBatchSize: batchSize,
|
||||||
|
MetricsWritten: selfstat.Register(
|
||||||
|
"write",
|
||||||
|
"metrics_written",
|
||||||
|
map[string]string{"output": name},
|
||||||
|
),
|
||||||
|
MetricsFiltered: selfstat.Register(
|
||||||
|
"write",
|
||||||
|
"metrics_filtered",
|
||||||
|
map[string]string{"output": name},
|
||||||
|
),
|
||||||
|
BufferSize: selfstat.Register(
|
||||||
|
"write",
|
||||||
|
"buffer_size",
|
||||||
|
map[string]string{"output": name},
|
||||||
|
),
|
||||||
|
BufferLimit: selfstat.Register(
|
||||||
|
"write",
|
||||||
|
"buffer_limit",
|
||||||
|
map[string]string{"output": name},
|
||||||
|
),
|
||||||
|
WriteTime: selfstat.RegisterTiming(
|
||||||
|
"write",
|
||||||
|
"write_time_ns",
|
||||||
|
map[string]string{"output": name},
|
||||||
|
),
|
||||||
|
}
|
||||||
|
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
|
||||||
|
return ro
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMetric adds a metric to the output. This function can also write cached
|
||||||
|
// points if FlushBufferWhenFull is true.
|
||||||
|
func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Filter any tagexclude/taginclude parameters before adding metric
|
||||||
|
if ro.Config.Filter.IsActive() {
|
||||||
|
// In order to filter out tags, we need to create a new metric, since
|
||||||
|
// metrics are immutable once created.
|
||||||
|
name := m.Name()
|
||||||
|
tags := m.Tags()
|
||||||
|
fields := m.Fields()
|
||||||
|
t := m.Time()
|
||||||
|
if ok := ro.Config.Filter.Apply(name, fields, tags); !ok {
|
||||||
|
ro.MetricsFiltered.Incr(1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// error is not possible if creating from another metric, so ignore.
|
||||||
|
m, _ = metric.New(name, tags, fields, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
ro.metrics.Add(m)
|
||||||
|
if ro.metrics.Len() == ro.MetricBatchSize {
|
||||||
|
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||||
|
err := ro.write(batch)
|
||||||
|
if err != nil {
|
||||||
|
ro.failMetrics.Add(batch...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes all cached points to this output.
|
||||||
|
func (ro *RunningOutput) Write() error {
|
||||||
|
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
|
||||||
|
ro.BufferSize.Set(int64(nFails + nMetrics))
|
||||||
|
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
|
||||||
|
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
|
||||||
|
var err error
|
||||||
|
if !ro.failMetrics.IsEmpty() {
|
||||||
|
// how many batches of failed writes we need to write.
|
||||||
|
nBatches := nFails/ro.MetricBatchSize + 1
|
||||||
|
batchSize := ro.MetricBatchSize
|
||||||
|
|
||||||
|
for i := 0; i < nBatches; i++ {
|
||||||
|
// If it's the last batch, only grab the metrics that have not had
|
||||||
|
// a write attempt already (this is primarily to preserve order).
|
||||||
|
if i == nBatches-1 {
|
||||||
|
batchSize = nFails % ro.MetricBatchSize
|
||||||
|
}
|
||||||
|
batch := ro.failMetrics.Batch(batchSize)
|
||||||
|
// If we've already failed previous writes, don't bother trying to
|
||||||
|
// write to this output again. We are not exiting the loop just so
|
||||||
|
// that we can rotate the metrics to preserve order.
|
||||||
|
if err == nil {
|
||||||
|
err = ro.write(batch)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
ro.failMetrics.Add(batch...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := ro.metrics.Batch(ro.MetricBatchSize)
|
||||||
|
// see comment above about not trying to write to an already failed output.
|
||||||
|
// if ro.failMetrics is empty then err will always be nil at this point.
|
||||||
|
if err == nil {
|
||||||
|
err = ro.write(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
ro.failMetrics.Add(batch...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
|
||||||
|
nMetrics := len(metrics)
|
||||||
|
if nMetrics == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ro.Lock()
|
||||||
|
defer ro.Unlock()
|
||||||
|
start := time.Now()
|
||||||
|
err := ro.Output.Write(metrics)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
if err == nil {
|
||||||
|
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
|
||||||
|
ro.Name, nMetrics, elapsed)
|
||||||
|
ro.MetricsWritten.Incr(int64(nMetrics))
|
||||||
|
ro.WriteTime.Incr(elapsed.Nanoseconds())
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// OutputConfig containing name and filter
|
||||||
|
type OutputConfig struct {
|
||||||
|
Name string
|
||||||
|
Filter Filter
|
||||||
|
}
|
|
@ -4,11 +4,10 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/selfstat"
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -29,14 +28,6 @@ var next5 = []telegraf.Metric{
|
||||||
testutil.TestMetric(101, "metric10"),
|
testutil.TestMetric(101, "metric10"),
|
||||||
}
|
}
|
||||||
|
|
||||||
func reverse(metrics []telegraf.Metric) []telegraf.Metric {
|
|
||||||
result := make([]telegraf.Metric, 0, len(metrics))
|
|
||||||
for i := len(metrics) - 1; i >= 0; i-- {
|
|
||||||
result = append(result, metrics[i])
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Benchmark adding metrics.
|
// Benchmark adding metrics.
|
||||||
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
func BenchmarkRunningOutputAddWrite(b *testing.B) {
|
||||||
conf := &OutputConfig{
|
conf := &OutputConfig{
|
||||||
|
@ -84,6 +75,23 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAddingNilMetric(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
||||||
|
|
||||||
|
ro.AddMetric(nil)
|
||||||
|
ro.AddMetric(nil)
|
||||||
|
ro.AddMetric(nil)
|
||||||
|
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
}
|
||||||
|
|
||||||
// Test that NameDrop filters ger properly applied.
|
// Test that NameDrop filters ger properly applied.
|
||||||
func TestRunningOutput_DropFilter(t *testing.T) {
|
func TestRunningOutput_DropFilter(t *testing.T) {
|
||||||
conf := &OutputConfig{
|
conf := &OutputConfig{
|
||||||
|
@ -218,60 +226,6 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) {
|
||||||
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
assert.Len(t, m.Metrics()[0].Tags(), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that measurement name overriding correctly
|
|
||||||
func TestRunningOutput_NameOverride(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
NameOverride: "new_metric_name",
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 1)
|
|
||||||
assert.Equal(t, "new_metric_name", m.Metrics()[0].Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that measurement name prefix is added correctly
|
|
||||||
func TestRunningOutput_NamePrefix(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
NamePrefix: "prefix_",
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 1)
|
|
||||||
assert.Equal(t, "prefix_metric1", m.Metrics()[0].Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that measurement name suffix is added correctly
|
|
||||||
func TestRunningOutput_NameSuffix(t *testing.T) {
|
|
||||||
conf := &OutputConfig{
|
|
||||||
NameSuffix: "_suffix",
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &mockOutput{}
|
|
||||||
ro := NewRunningOutput("test", m, conf, 1000, 10000)
|
|
||||||
|
|
||||||
ro.AddMetric(testutil.TestMetric(101, "metric1"))
|
|
||||||
assert.Len(t, m.Metrics(), 0)
|
|
||||||
|
|
||||||
err := ro.Write()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, m.Metrics(), 1)
|
|
||||||
assert.Equal(t, "metric1_suffix", m.Metrics()[0].Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that we can write metrics with simple default setup.
|
// Test that we can write metrics with simple default setup.
|
||||||
func TestRunningOutputDefault(t *testing.T) {
|
func TestRunningOutputDefault(t *testing.T) {
|
||||||
conf := &OutputConfig{
|
conf := &OutputConfig{
|
||||||
|
@ -294,6 +248,56 @@ func TestRunningOutputDefault(t *testing.T) {
|
||||||
assert.Len(t, m.Metrics(), 10)
|
assert.Len(t, m.Metrics(), 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that running output doesn't flush until it's full when
|
||||||
|
// FlushBufferWhenFull is set.
|
||||||
|
func TestRunningOutputFlushWhenFull(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 6, 10)
|
||||||
|
|
||||||
|
// Fill buffer to 1 under limit
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// no flush yet
|
||||||
|
assert.Len(t, m.Metrics(), 0)
|
||||||
|
|
||||||
|
// add one more metric
|
||||||
|
ro.AddMetric(next5[0])
|
||||||
|
// now it flushed
|
||||||
|
assert.Len(t, m.Metrics(), 6)
|
||||||
|
|
||||||
|
// add one more metric and write it manually
|
||||||
|
ro.AddMetric(next5[1])
|
||||||
|
err := ro.Write()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, m.Metrics(), 7)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that running output doesn't flush until it's full when
|
||||||
|
// FlushBufferWhenFull is set, twice.
|
||||||
|
func TestRunningOutputMultiFlushWhenFull(t *testing.T) {
|
||||||
|
conf := &OutputConfig{
|
||||||
|
Filter: Filter{},
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &mockOutput{}
|
||||||
|
ro := NewRunningOutput("test", m, conf, 4, 12)
|
||||||
|
|
||||||
|
// Fill buffer past limit twive
|
||||||
|
for _, metric := range first5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
for _, metric := range next5 {
|
||||||
|
ro.AddMetric(metric)
|
||||||
|
}
|
||||||
|
// flushed twice
|
||||||
|
assert.Len(t, m.Metrics(), 8)
|
||||||
|
}
|
||||||
|
|
||||||
func TestRunningOutputWriteFail(t *testing.T) {
|
func TestRunningOutputWriteFail(t *testing.T) {
|
||||||
conf := &OutputConfig{
|
conf := &OutputConfig{
|
||||||
Filter: Filter{},
|
Filter: Filter{},
|
||||||
|
@ -360,7 +364,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) {
|
||||||
// Verify that 10 metrics were written
|
// Verify that 10 metrics were written
|
||||||
assert.Len(t, m.Metrics(), 10)
|
assert.Len(t, m.Metrics(), 10)
|
||||||
// Verify that they are in order
|
// Verify that they are in order
|
||||||
expected := append(reverse(next5), reverse(first5)...)
|
expected := append(first5, next5...)
|
||||||
assert.Equal(t, expected, m.Metrics())
|
assert.Equal(t, expected, m.Metrics())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -418,17 +422,24 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) {
|
||||||
err = ro.Write()
|
err = ro.Write()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Verify that 20 metrics were written
|
// Verify that 10 metrics were written
|
||||||
assert.Len(t, m.Metrics(), 20)
|
assert.Len(t, m.Metrics(), 20)
|
||||||
// Verify that they are in order
|
// Verify that they are in order
|
||||||
expected := append(reverse(next5), reverse(first5)...)
|
expected := append(first5, next5...)
|
||||||
expected = append(expected, reverse(next5)...)
|
expected = append(expected, first5...)
|
||||||
expected = append(expected, reverse(first5)...)
|
expected = append(expected, next5...)
|
||||||
assert.Equal(t, expected, m.Metrics())
|
assert.Equal(t, expected, m.Metrics())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that the order of points is preserved when there is a remainder
|
// Verify that the order of points is preserved when there is a remainder
|
||||||
// of points for the batch.
|
// of points for the batch.
|
||||||
|
//
|
||||||
|
// ie, with a batch size of 5:
|
||||||
|
//
|
||||||
|
// 1 2 3 4 5 6 <-- order, failed points
|
||||||
|
// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch)
|
||||||
|
// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch)
|
||||||
|
//
|
||||||
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||||
conf := &OutputConfig{
|
conf := &OutputConfig{
|
||||||
Filter: Filter{},
|
Filter: Filter{},
|
||||||
|
@ -464,54 +475,10 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) {
|
||||||
// Verify that 6 metrics were written
|
// Verify that 6 metrics were written
|
||||||
assert.Len(t, m.Metrics(), 6)
|
assert.Len(t, m.Metrics(), 6)
|
||||||
// Verify that they are in order
|
// Verify that they are in order
|
||||||
expected := []telegraf.Metric{next5[0], first5[4], first5[3], first5[2], first5[1], first5[0]}
|
expected := append(first5, next5[0])
|
||||||
assert.Equal(t, expected, m.Metrics())
|
assert.Equal(t, expected, m.Metrics())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalMetrics(t *testing.T) {
|
|
||||||
_ = NewRunningOutput(
|
|
||||||
"test_internal",
|
|
||||||
&mockOutput{},
|
|
||||||
&OutputConfig{
|
|
||||||
Filter: Filter{},
|
|
||||||
Name: "test_name",
|
|
||||||
Alias: "test_alias",
|
|
||||||
},
|
|
||||||
5,
|
|
||||||
10)
|
|
||||||
|
|
||||||
expected := []telegraf.Metric{
|
|
||||||
testutil.MustMetric(
|
|
||||||
"internal_write",
|
|
||||||
map[string]string{
|
|
||||||
"output": "test_name",
|
|
||||||
"alias": "test_alias",
|
|
||||||
},
|
|
||||||
map[string]interface{}{
|
|
||||||
"buffer_limit": 10,
|
|
||||||
"buffer_size": 0,
|
|
||||||
"errors": 0,
|
|
||||||
"metrics_added": 0,
|
|
||||||
"metrics_dropped": 0,
|
|
||||||
"metrics_filtered": 0,
|
|
||||||
"metrics_written": 0,
|
|
||||||
"write_time_ns": 0,
|
|
||||||
},
|
|
||||||
time.Unix(0, 0),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
var actual []telegraf.Metric
|
|
||||||
for _, m := range selfstat.Metrics() {
|
|
||||||
output, _ := m.GetTag("output")
|
|
||||||
if m.Name() == "internal_write" && output == "test_name" {
|
|
||||||
actual = append(actual, m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime())
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockOutput struct {
|
type mockOutput struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RunningProcessor struct {
|
||||||
|
Name string
|
||||||
|
|
||||||
|
sync.Mutex
|
||||||
|
Processor telegraf.Processor
|
||||||
|
Config *ProcessorConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunningProcessors []*RunningProcessor
|
||||||
|
|
||||||
|
func (rp RunningProcessors) Len() int { return len(rp) }
|
||||||
|
func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] }
|
||||||
|
func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order }
|
||||||
|
|
||||||
|
// FilterConfig containing a name and filter
|
||||||
|
type ProcessorConfig struct {
|
||||||
|
Name string
|
||||||
|
Order int64
|
||||||
|
Filter Filter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||||
|
rp.Lock()
|
||||||
|
defer rp.Unlock()
|
||||||
|
|
||||||
|
ret := []telegraf.Metric{}
|
||||||
|
|
||||||
|
for _, metric := range in {
|
||||||
|
if rp.Config.Filter.IsActive() {
|
||||||
|
// check if the filter should be applied to this metric
|
||||||
|
if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok {
|
||||||
|
// this means filter should not be applied
|
||||||
|
ret = append(ret, metric)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// This metric should pass through the filter, so call the filter Apply
|
||||||
|
// function and append results to the output slice.
|
||||||
|
ret = append(ret, rp.Processor.Apply(metric)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
|
@ -0,0 +1,117 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/influxdata/telegraf"
|
||||||
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestProcessor struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TestProcessor) SampleConfig() string { return "" }
|
||||||
|
func (f *TestProcessor) Description() string { return "" }
|
||||||
|
|
||||||
|
// Apply renames:
|
||||||
|
// "foo" to "fuz"
|
||||||
|
// "bar" to "baz"
|
||||||
|
// And it also drops measurements named "dropme"
|
||||||
|
func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
|
||||||
|
out := make([]telegraf.Metric, 0)
|
||||||
|
for _, m := range in {
|
||||||
|
switch m.Name() {
|
||||||
|
case "foo":
|
||||||
|
out = append(out, testutil.TestMetric(1, "fuz"))
|
||||||
|
case "bar":
|
||||||
|
out = append(out, testutil.TestMetric(1, "baz"))
|
||||||
|
case "dropme":
|
||||||
|
// drop the metric!
|
||||||
|
default:
|
||||||
|
out = append(out, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTestRunningProcessor() *RunningProcessor {
|
||||||
|
out := &RunningProcessor{
|
||||||
|
Name: "test",
|
||||||
|
Processor: &TestProcessor{},
|
||||||
|
Config: &ProcessorConfig{Filter: Filter{}},
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunningProcessor(t *testing.T) {
|
||||||
|
inmetrics := []telegraf.Metric{
|
||||||
|
testutil.TestMetric(1, "foo"),
|
||||||
|
testutil.TestMetric(1, "bar"),
|
||||||
|
testutil.TestMetric(1, "baz"),
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedNames := []string{
|
||||||
|
"fuz",
|
||||||
|
"baz",
|
||||||
|
"baz",
|
||||||
|
}
|
||||||
|
rfp := NewTestRunningProcessor()
|
||||||
|
filteredMetrics := rfp.Apply(inmetrics...)
|
||||||
|
|
||||||
|
actualNames := []string{
|
||||||
|
filteredMetrics[0].Name(),
|
||||||
|
filteredMetrics[1].Name(),
|
||||||
|
filteredMetrics[2].Name(),
|
||||||
|
}
|
||||||
|
assert.Equal(t, expectedNames, actualNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunningProcessor_WithNameDrop(t *testing.T) {
|
||||||
|
inmetrics := []telegraf.Metric{
|
||||||
|
testutil.TestMetric(1, "foo"),
|
||||||
|
testutil.TestMetric(1, "bar"),
|
||||||
|
testutil.TestMetric(1, "baz"),
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedNames := []string{
|
||||||
|
"foo",
|
||||||
|
"baz",
|
||||||
|
"baz",
|
||||||
|
}
|
||||||
|
rfp := NewTestRunningProcessor()
|
||||||
|
|
||||||
|
rfp.Config.Filter.NameDrop = []string{"foo"}
|
||||||
|
assert.NoError(t, rfp.Config.Filter.Compile())
|
||||||
|
|
||||||
|
filteredMetrics := rfp.Apply(inmetrics...)
|
||||||
|
|
||||||
|
actualNames := []string{
|
||||||
|
filteredMetrics[0].Name(),
|
||||||
|
filteredMetrics[1].Name(),
|
||||||
|
filteredMetrics[2].Name(),
|
||||||
|
}
|
||||||
|
assert.Equal(t, expectedNames, actualNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunningProcessor_DroppedMetric(t *testing.T) {
|
||||||
|
inmetrics := []telegraf.Metric{
|
||||||
|
testutil.TestMetric(1, "dropme"),
|
||||||
|
testutil.TestMetric(1, "foo"),
|
||||||
|
testutil.TestMetric(1, "bar"),
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedNames := []string{
|
||||||
|
"fuz",
|
||||||
|
"baz",
|
||||||
|
}
|
||||||
|
rfp := NewTestRunningProcessor()
|
||||||
|
filteredMetrics := rfp.Apply(inmetrics...)
|
||||||
|
|
||||||
|
actualNames := []string{
|
||||||
|
filteredMetrics[0].Name(),
|
||||||
|
filteredMetrics[1].Name(),
|
||||||
|
}
|
||||||
|
assert.Equal(t, expectedNames, actualNames)
|
||||||
|
}
|
|
@ -1,185 +0,0 @@
|
||||||
package rotate
|
|
||||||
|
|
||||||
// Rotating things
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FilePerm defines the permissions that Writer will use for all
|
|
||||||
// the files it creates.
|
|
||||||
const (
|
|
||||||
FilePerm = os.FileMode(0644)
|
|
||||||
DateFormat = "2006-01-02"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileWriter implements the io.Writer interface and writes to the
|
|
||||||
// filename specified.
|
|
||||||
// Will rotate at the specified interval and/or when the current file size exceeds maxSizeInBytes
|
|
||||||
// At rotation time, current file is renamed and a new file is created.
|
|
||||||
// If the number of archives exceeds maxArchives, older files are deleted.
|
|
||||||
type FileWriter struct {
|
|
||||||
filename string
|
|
||||||
filenameRotationTemplate string
|
|
||||||
current *os.File
|
|
||||||
interval time.Duration
|
|
||||||
maxSizeInBytes int64
|
|
||||||
maxArchives int
|
|
||||||
expireTime time.Time
|
|
||||||
bytesWritten int64
|
|
||||||
sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFileWriter creates a new file writer.
|
|
||||||
func NewFileWriter(filename string, interval time.Duration, maxSizeInBytes int64, maxArchives int) (io.WriteCloser, error) {
|
|
||||||
if interval == 0 && maxSizeInBytes <= 0 {
|
|
||||||
// No rotation needed so a basic io.Writer will do the trick
|
|
||||||
return openFile(filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
w := &FileWriter{
|
|
||||||
filename: filename,
|
|
||||||
interval: interval,
|
|
||||||
maxSizeInBytes: maxSizeInBytes,
|
|
||||||
maxArchives: maxArchives,
|
|
||||||
filenameRotationTemplate: getFilenameRotationTemplate(filename),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.openCurrent(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return w, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func openFile(filename string) (*os.File, error) {
|
|
||||||
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, FilePerm)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFilenameRotationTemplate(filename string) string {
|
|
||||||
// Extract the file extension
|
|
||||||
fileExt := filepath.Ext(filename)
|
|
||||||
// Remove the file extension from the filename (if any)
|
|
||||||
stem := strings.TrimSuffix(filename, fileExt)
|
|
||||||
return stem + ".%s-%s" + fileExt
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes p to the current file, then checks to see if
|
|
||||||
// rotation is necessary.
|
|
||||||
func (w *FileWriter) Write(p []byte) (n int, err error) {
|
|
||||||
w.Lock()
|
|
||||||
defer w.Unlock()
|
|
||||||
if n, err = w.current.Write(p); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
w.bytesWritten += int64(n)
|
|
||||||
|
|
||||||
if err = w.rotateIfNeeded(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the current file. Writer is unusable after this
|
|
||||||
// is called.
|
|
||||||
func (w *FileWriter) Close() (err error) {
|
|
||||||
w.Lock()
|
|
||||||
defer w.Unlock()
|
|
||||||
|
|
||||||
// Rotate before closing
|
|
||||||
if err = w.rotate(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
w.current = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *FileWriter) openCurrent() (err error) {
|
|
||||||
// In case ModTime() fails, we use time.Now()
|
|
||||||
w.expireTime = time.Now().Add(w.interval)
|
|
||||||
w.bytesWritten = 0
|
|
||||||
w.current, err = openFile(w.filename)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Goal here is to rotate old pre-existing files.
|
|
||||||
// For that we use fileInfo.ModTime, instead of time.Now().
|
|
||||||
// Example: telegraf is restarted every 23 hours and
|
|
||||||
// the rotation interval is set to 24 hours.
|
|
||||||
// With time.now() as a reference we'd never rotate the file.
|
|
||||||
if fileInfo, err := w.current.Stat(); err == nil {
|
|
||||||
w.expireTime = fileInfo.ModTime().Add(w.interval)
|
|
||||||
w.bytesWritten = fileInfo.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = w.rotateIfNeeded(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *FileWriter) rotateIfNeeded() error {
|
|
||||||
if (w.interval > 0 && time.Now().After(w.expireTime)) ||
|
|
||||||
(w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) {
|
|
||||||
if err := w.rotate(); err != nil {
|
|
||||||
//Ignore rotation errors and keep the log open
|
|
||||||
fmt.Printf("unable to rotate the file '%s', %s", w.filename, err.Error())
|
|
||||||
}
|
|
||||||
return w.openCurrent()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *FileWriter) rotate() (err error) {
|
|
||||||
if err = w.current.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use year-month-date for readability, unix time to make the file name unique with second precision
|
|
||||||
now := time.Now()
|
|
||||||
rotatedFilename := fmt.Sprintf(w.filenameRotationTemplate, now.Format(DateFormat), strconv.FormatInt(now.Unix(), 10))
|
|
||||||
if err = os.Rename(w.filename, rotatedFilename); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = w.purgeArchivesIfNeeded(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *FileWriter) purgeArchivesIfNeeded() (err error) {
|
|
||||||
if w.maxArchives == -1 {
|
|
||||||
//Skip archiving
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var matches []string
|
|
||||||
if matches, err = filepath.Glob(fmt.Sprintf(w.filenameRotationTemplate, "*", "*")); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
//if there are more archives than the configured maximum, then purge older files
|
|
||||||
if len(matches) > w.maxArchives {
|
|
||||||
//sort files alphanumerically to delete older files first
|
|
||||||
sort.Strings(matches)
|
|
||||||
for _, filename := range matches[:len(matches)-w.maxArchives] {
|
|
||||||
if err = os.Remove(filename); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,148 +0,0 @@
|
||||||
package rotate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFileWriter_NoRotation(t *testing.T) {
|
|
||||||
tempDir, err := ioutil.TempDir("", "RotationNo")
|
|
||||||
require.NoError(t, err)
|
|
||||||
writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
|
|
||||||
|
|
||||||
_, err = writer.Write([]byte("Hello World"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = writer.Write([]byte("Hello World 2"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
files, _ := ioutil.ReadDir(tempDir)
|
|
||||||
assert.Equal(t, 1, len(files))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileWriter_TimeRotation(t *testing.T) {
|
|
||||||
tempDir, err := ioutil.TempDir("", "RotationTime")
|
|
||||||
require.NoError(t, err)
|
|
||||||
interval, _ := time.ParseDuration("1s")
|
|
||||||
writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
|
|
||||||
|
|
||||||
_, err = writer.Write([]byte("Hello World"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
_, err = writer.Write([]byte("Hello World 2"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
files, _ := ioutil.ReadDir(tempDir)
|
|
||||||
assert.Equal(t, 2, len(files))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileWriter_ReopenTimeRotation(t *testing.T) {
|
|
||||||
tempDir, err := ioutil.TempDir("", "RotationTime")
|
|
||||||
require.NoError(t, err)
|
|
||||||
interval, _ := time.ParseDuration("1s")
|
|
||||||
filePath := filepath.Join(tempDir, "test.log")
|
|
||||||
err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644)
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
|
|
||||||
|
|
||||||
files, _ := ioutil.ReadDir(tempDir)
|
|
||||||
assert.Equal(t, 2, len(files))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileWriter_SizeRotation(t *testing.T) {
|
|
||||||
tempDir, err := ioutil.TempDir("", "RotationSize")
|
|
||||||
require.NoError(t, err)
|
|
||||||
maxSize := int64(9)
|
|
||||||
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
|
|
||||||
|
|
||||||
_, err = writer.Write([]byte("Hello World"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = writer.Write([]byte("World 2"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
files, _ := ioutil.ReadDir(tempDir)
|
|
||||||
assert.Equal(t, 2, len(files))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileWriter_ReopenSizeRotation(t *testing.T) {
|
|
||||||
tempDir, err := ioutil.TempDir("", "RotationSize")
|
|
||||||
require.NoError(t, err)
|
|
||||||
maxSize := int64(12)
|
|
||||||
filePath := filepath.Join(tempDir, "test.log")
|
|
||||||
err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
|
|
||||||
|
|
||||||
_, err = writer.Write([]byte("Hello World Again"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
files, _ := ioutil.ReadDir(tempDir)
|
|
||||||
assert.Equal(t, 2, len(files))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileWriter_DeleteArchives(t *testing.T) {
|
|
||||||
tempDir, err := ioutil.TempDir("", "RotationDeleteArchives")
|
|
||||||
require.NoError(t, err)
|
|
||||||
maxSize := int64(5)
|
|
||||||
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { writer.Close(); os.RemoveAll(tempDir) }()
|
|
||||||
|
|
||||||
_, err = writer.Write([]byte("First file"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
// File names include the date with second precision
|
|
||||||
// So, to force rotation with different file names
|
|
||||||
// we need to wait
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
_, err = writer.Write([]byte("Second file"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
_, err = writer.Write([]byte("Third file"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
files, _ := ioutil.ReadDir(tempDir)
|
|
||||||
assert.Equal(t, 3, len(files))
|
|
||||||
|
|
||||||
for _, tempFile := range files {
|
|
||||||
var bytes []byte
|
|
||||||
var err error
|
|
||||||
path := filepath.Join(tempDir, tempFile.Name())
|
|
||||||
if bytes, err = ioutil.ReadFile(path); err != nil {
|
|
||||||
t.Error(err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
contents := string(bytes)
|
|
||||||
|
|
||||||
if contents != "" && contents != "Second file" && contents != "Third file" {
|
|
||||||
t.Error("Should have deleted the eldest log file")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileWriter_CloseRotates(t *testing.T) {
|
|
||||||
tempDir, err := ioutil.TempDir("", "RotationClose")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.RemoveAll(tempDir)
|
|
||||||
maxSize := int64(9)
|
|
||||||
writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
writer.Close()
|
|
||||||
|
|
||||||
files, _ := ioutil.ReadDir(tempDir)
|
|
||||||
assert.Equal(t, 1, len(files))
|
|
||||||
assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name())
|
|
||||||
}
|
|
|
@ -1,64 +0,0 @@
|
||||||
package syslog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Framing represents the framing technique we expect the messages to come.
|
|
||||||
type Framing int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// OctetCounting indicates the transparent framing technique for syslog transport.
|
|
||||||
OctetCounting Framing = iota
|
|
||||||
// NonTransparent indicates the non-transparent framing technique for syslog transport.
|
|
||||||
NonTransparent
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f Framing) String() string {
|
|
||||||
switch f {
|
|
||||||
case OctetCounting:
|
|
||||||
return "OCTET-COUNTING"
|
|
||||||
case NonTransparent:
|
|
||||||
return "NON-TRANSPARENT"
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalTOML implements ability to unmarshal framing from TOML files.
|
|
||||||
func (f *Framing) UnmarshalTOML(data []byte) (err error) {
|
|
||||||
return f.UnmarshalText(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText implements encoding.TextUnmarshaler
|
|
||||||
func (f *Framing) UnmarshalText(data []byte) (err error) {
|
|
||||||
s := string(data)
|
|
||||||
switch strings.ToUpper(s) {
|
|
||||||
case `OCTET-COUNTING`:
|
|
||||||
fallthrough
|
|
||||||
case `"OCTET-COUNTING"`:
|
|
||||||
fallthrough
|
|
||||||
case `'OCTET-COUNTING'`:
|
|
||||||
*f = OctetCounting
|
|
||||||
return
|
|
||||||
|
|
||||||
case `NON-TRANSPARENT`:
|
|
||||||
fallthrough
|
|
||||||
case `"NON-TRANSPARENT"`:
|
|
||||||
fallthrough
|
|
||||||
case `'NON-TRANSPARENT'`:
|
|
||||||
*f = NonTransparent
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*f = -1
|
|
||||||
return fmt.Errorf("unknown framing")
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalText implements encoding.TextMarshaler
|
|
||||||
func (f Framing) MarshalText() ([]byte, error) {
|
|
||||||
s := f.String()
|
|
||||||
if s != "" {
|
|
||||||
return []byte(s), nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unknown framing")
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
package syslog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFraming(t *testing.T) {
|
|
||||||
var f1 Framing
|
|
||||||
f1.UnmarshalTOML([]byte(`"non-transparent"`))
|
|
||||||
assert.Equal(t, NonTransparent, f1)
|
|
||||||
|
|
||||||
var f2 Framing
|
|
||||||
f2.UnmarshalTOML([]byte(`non-transparent`))
|
|
||||||
assert.Equal(t, NonTransparent, f2)
|
|
||||||
|
|
||||||
var f3 Framing
|
|
||||||
f3.UnmarshalTOML([]byte(`'non-transparent'`))
|
|
||||||
assert.Equal(t, NonTransparent, f3)
|
|
||||||
|
|
||||||
var f4 Framing
|
|
||||||
f4.UnmarshalTOML([]byte(`"octet-counting"`))
|
|
||||||
assert.Equal(t, OctetCounting, f4)
|
|
||||||
|
|
||||||
var f5 Framing
|
|
||||||
f5.UnmarshalTOML([]byte(`octet-counting`))
|
|
||||||
assert.Equal(t, OctetCounting, f5)
|
|
||||||
|
|
||||||
var f6 Framing
|
|
||||||
f6.UnmarshalTOML([]byte(`'octet-counting'`))
|
|
||||||
assert.Equal(t, OctetCounting, f6)
|
|
||||||
|
|
||||||
var f7 Framing
|
|
||||||
err := f7.UnmarshalTOML([]byte(`nope`))
|
|
||||||
assert.Equal(t, Framing(-1), f7)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue