renaming plugins -> inputs

This commit is contained in:
Cameron Sparr 2016-01-07 13:39:43 -07:00
parent 30d24a3c1c
commit 9c5db1057d
175 changed files with 606 additions and 572 deletions

View File

@ -1,14 +1,17 @@
## v0.3.0 [unreleased]
### Release Notes
- **breaking change** `plugins` have been renamed to `inputs`. This was done because
`plugins` is too generic, as there are now also "output plugins", and will likely
be "aggregator plugins" and "filter plugins" in the future. Additionally,
`inputs/` and `outputs/` directories have been placed in the root-level `plugins/`
directory.
- **breaking change** the `io` plugin has been renamed `diskio`
- **breaking change** Plugin measurements aggregated into a single measurement.
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
for configuration.
- **breaking change** `procstat` plugin has `*cpu*` fields renamed to
`*cpu_time*`
- `twemproxy` plugin: `prefix` option removed.
- `procstat` cpu measurements are now prepended with `cpu_time_` instead of
- **breaking change** `twemproxy` plugin: `prefix` option removed.
- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_` instead of
only `cpu_`
- The prometheus plugin schema has not been changed (measurements have not been
aggregated).
@ -18,7 +21,7 @@ aggregated).
- Added ability to specify per-plugin tags
- Added ability to specify per-plugin measurement suffix and prefix.
(`name_prefix` and `name_suffix`)
- Added ability to override base plugin name. (`name_override`)
- Added ability to override base plugin measurement name. (`name_override`)
### Bugfixes
@ -62,11 +65,11 @@ functional.
same type can be specified, like this:
```
[[plugins.cpu]]
[[inputs.cpu]]
percpu = false
totalcpu = true
[[plugins.cpu]]
[[inputs.cpu]]
percpu = true
totalcpu = false
drop = ["cpu_time"]
@ -93,7 +96,7 @@ same type can be specified, like this:
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
### Bugfixes
- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in plugins.
- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in inputs.
## v0.2.1 [2015-11-16]
@ -154,7 +157,7 @@ be controlled via the `round_interval` and `flush_jitter` config options.
- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou!
- Memory plugin: cached and buffered measurements re-added
- Logging: additional logging for each collection interval, track the number
of metrics collected and from how many plugins.
of metrics collected and from how many inputs.
- [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib!
- [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou!
- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
@ -187,7 +190,7 @@ will still be backwards compatible if only `url` is specified.
- The -test flag will now output two metric collections
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
allow filtering of output sinks on the command-line using the `-outputfilter`
flag, much like how the `-filter` flag works for plugins.
flag, much like how the `-filter` flag works for inputs.
- Support for filtering on config-file creation -- Telegraf now supports
filtering to -sample-config command. You can now run
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config

View File

@ -5,9 +5,9 @@
A default Telegraf config file can be generated using the `-sample-config` flag,
like this: `telegraf -sample-config`
To generate a file with specific collectors and outputs, you can use the
`-filter` and `-outputfilter` flags, like this:
`telegraf -sample-config -filter cpu:mem:net:swap -outputfilter influxdb:kafka`
To generate a file with specific inputs and outputs, you can use the
`-input-filter` and `-output-filter` flags, like this:
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
## Plugin Configuration
@ -59,7 +59,7 @@ fields which begin with `time_`.
# PLUGINS
[plugins]
[[plugins.cpu]]
[[inputs.cpu]]
percpu = true
totalcpu = false
# filter all fields beginning with 'time_'
@ -70,16 +70,16 @@ fields which begin with `time_`.
```toml
[plugins]
[[plugins.cpu]]
[[inputs.cpu]]
percpu = true
totalcpu = false
drop = ["cpu_time"]
# Don't collect CPU data for cpu6 & cpu7
[plugins.cpu.tagdrop]
[inputs.cpu.tagdrop]
cpu = [ "cpu6", "cpu7" ]
[[plugins.disk]]
[plugins.disk.tagpass]
[[inputs.disk]]
[inputs.disk.tagpass]
# tagpass conditions are OR, not AND.
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
# then the metric passes
@ -92,13 +92,13 @@ fields which begin with `time_`.
```toml
# Drop all metrics for guest & steal CPU usage
[[plugins.cpu]]
[[inputs.cpu]]
percpu = false
totalcpu = true
drop = ["usage_guest", "usage_steal"]
# Only store inode related metrics for disks
[[plugins.disk]]
[[inputs.disk]]
pass = ["inodes*"]
```
@ -107,7 +107,7 @@ fields which begin with `time_`.
This plugin will emit measurements with the name `cpu_total`
```toml
[[plugins.cpu]]
[[inputs.cpu]]
name_suffix = "_total"
percpu = false
totalcpu = true
@ -116,7 +116,7 @@ This plugin will emit measurements with the name `cpu_total`
This will emit measurements with the name `foobar`
```toml
[[plugins.cpu]]
[[inputs.cpu]]
name_override = "foobar"
percpu = false
totalcpu = true
@ -128,10 +128,10 @@ This plugin will emit measurements with two additional tags: `tag1=foo` and
`tag2=bar`
```toml
[[plugins.cpu]]
[[inputs.cpu]]
percpu = false
totalcpu = true
[plugins.cpu.tags]
[inputs.cpu.tags]
tag1 = "foo"
tag2 = "bar"
```
@ -142,11 +142,11 @@ Additional plugins (or outputs) of the same type can be specified,
just define more instances in the config file:
```toml
[[plugins.cpu]]
[[inputs.cpu]]
percpu = false
totalcpu = true
[[plugins.cpu]]
[[inputs.cpu]]
percpu = true
totalcpu = false
drop = ["cpu_time*"]

View File

@ -5,23 +5,23 @@ which can be found [on our website](http://influxdb.com/community/cla.html)
## Plugins
This section is for developers who want to create new collection plugins.
This section is for developers who want to create new collection inputs.
Telegraf is entirely plugin driven. This interface allows for operators to
pick and chose what is gathered as well as makes it easy for developers
to create new ways of generating metrics.
Plugin authorship is kept as simple as possible to promote people to develop
and submit new plugins.
and submit new inputs.
### Plugin Guidelines
* A plugin must conform to the `plugins.Plugin` interface.
* A plugin must conform to the `inputs.Input` interface.
* Each generated metric automatically has the name of the plugin that generated
it prepended. This is to keep plugins honest.
* Plugins should call `plugins.Add` in their `init` function to register themselves.
* Plugins should call `inputs.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdb/telegraf/plugins/all/all.go` file.
`github.com/influxdb/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this plugin does.
@ -78,7 +78,7 @@ type Process struct {
PID int
}
func Gather(acc plugins.Accumulator) error {
func Gather(acc inputs.Accumulator) error {
for _, process := range system.Processes() {
tags := map[string]string {
"pid": fmt.Sprintf("%d", process.Pid),
@ -97,7 +97,7 @@ package simple
// simple.go
import "github.com/influxdb/telegraf/plugins"
import "github.com/influxdb/telegraf/plugins/inputs"
type Simple struct {
Ok bool
@ -111,7 +111,7 @@ func (s *Simple) SampleConfig() string {
return "ok = true # indicate if everything is fine"
}
func (s *Simple) Gather(acc plugins.Accumulator) error {
func (s *Simple) Gather(acc inputs.Accumulator) error {
if s.Ok {
acc.Add("state", "pretty good", nil)
} else {
@ -122,14 +122,14 @@ func (s *Simple) Gather(acc plugins.Accumulator) error {
}
func init() {
plugins.Add("simple", func() plugins.Plugin { return &Simple{} })
inputs.Add("simple", func() inputs.Input { return &Simple{} })
}
```
## Service Plugins
This section is for developers who want to create new "service" collection
plugins. A service plugin differs from a regular plugin in that it operates
inputs. A service plugin differs from a regular plugin in that it operates
a background service while Telegraf is running. One example would be the `statsd`
plugin, which operates a statsd server.
@ -143,7 +143,7 @@ and `Stop()` methods.
### Service Plugin Guidelines
* Same as the `Plugin` guidelines, except that they must conform to the
`plugins.ServicePlugin` interface.
`inputs.ServiceInput` interface.
### Service Plugin interface
@ -169,7 +169,7 @@ similar constructs.
* Outputs should call `outputs.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdb/telegraf/outputs/all/all.go` file.
`github.com/influxdb/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this output does.
@ -193,7 +193,7 @@ package simpleoutput
// simpleoutput.go
import "github.com/influxdb/telegraf/outputs"
import "github.com/influxdb/telegraf/plugins/outputs"
type Simple struct {
Ok bool
@ -243,7 +243,7 @@ and `Stop()` methods.
### Service Output Guidelines
* Same as the `Output` guidelines, except that they must conform to the
`plugins.ServiceOutput` interface.
`inputs.ServiceOutput` interface.
### Service Output interface

View File

@ -11,7 +11,7 @@ APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
We'll eagerly accept pull requests for new plugins and will manage the set of
plugins that Telegraf supports. See the
[contributing guide](CONTRIBUTING.md) for instructions on
writing new plugins.
writing new inputs.
## Installation:
@ -92,7 +92,7 @@ if you don't have it already. You also must build with golang version 1.4+.
### How to use it:
* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration.
* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf`.
* Or run `telegraf -sample-config -input-filter cpu:mem -output-filter influxdb > telegraf.conf`.
to create a config file with only CPU and memory plugins defined, and InfluxDB
output defined.
* Edit the configuration to match your needs.
@ -100,7 +100,7 @@ output defined.
sample to STDOUT. NOTE: you may want to run as the telegraf user if you are using
the linux packages `sudo -u telegraf telegraf -config telegraf.conf -test`
* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs.
* Run `telegraf -config telegraf.conf -filter system:swap`.
* Run `telegraf -config telegraf.conf -input-filter system:swap`.
to run telegraf with only the system & swap plugins defined in the config.
## Telegraf Options

View File

@ -29,12 +29,12 @@ type Accumulator interface {
}
func NewAccumulator(
pluginConfig *config.PluginConfig,
inputConfig *config.InputConfig,
points chan *client.Point,
) Accumulator {
acc := accumulator{}
acc.points = points
acc.pluginConfig = pluginConfig
acc.inputConfig = inputConfig
return &acc
}
@ -47,7 +47,7 @@ type accumulator struct {
debug bool
pluginConfig *config.PluginConfig
inputConfig *config.InputConfig
prefix string
}
@ -73,27 +73,27 @@ func (ac *accumulator) AddFields(
return
}
if !ac.pluginConfig.Filter.ShouldTagsPass(tags) {
if !ac.inputConfig.Filter.ShouldTagsPass(tags) {
return
}
// Override measurement name if set
if len(ac.pluginConfig.NameOverride) != 0 {
measurement = ac.pluginConfig.NameOverride
if len(ac.inputConfig.NameOverride) != 0 {
measurement = ac.inputConfig.NameOverride
}
// Apply measurement prefix and suffix if set
if len(ac.pluginConfig.MeasurementPrefix) != 0 {
measurement = ac.pluginConfig.MeasurementPrefix + measurement
if len(ac.inputConfig.MeasurementPrefix) != 0 {
measurement = ac.inputConfig.MeasurementPrefix + measurement
}
if len(ac.pluginConfig.MeasurementSuffix) != 0 {
measurement = measurement + ac.pluginConfig.MeasurementSuffix
if len(ac.inputConfig.MeasurementSuffix) != 0 {
measurement = measurement + ac.inputConfig.MeasurementSuffix
}
if tags == nil {
tags = make(map[string]string)
}
// Apply plugin-wide tags if set
for k, v := range ac.pluginConfig.Tags {
for k, v := range ac.inputConfig.Tags {
if _, ok := tags[k]; !ok {
tags[k] = v
}
@ -108,8 +108,8 @@ func (ac *accumulator) AddFields(
result := make(map[string]interface{})
for k, v := range fields {
// Filter out any filtered fields
if ac.pluginConfig != nil {
if !ac.pluginConfig.Filter.ShouldPass(k) {
if ac.inputConfig != nil {
if !ac.inputConfig.Filter.ShouldPass(k) {
continue
}
}

View File

@ -10,8 +10,8 @@ import (
"time"
"github.com/influxdb/telegraf/internal/config"
"github.com/influxdb/telegraf/outputs"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"github.com/influxdb/telegraf/plugins/outputs"
"github.com/influxdb/influxdb/client/v2"
)
@ -85,33 +85,33 @@ func (a *Agent) Close() error {
return err
}
// gatherParallel runs the plugins that are using the same reporting interval
// gatherParallel runs the inputs that are using the same reporting interval
// as the telegraf agent.
func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
var wg sync.WaitGroup
start := time.Now()
counter := 0
for _, plugin := range a.Config.Plugins {
if plugin.Config.Interval != 0 {
for _, input := range a.Config.Inputs {
if input.Config.Interval != 0 {
continue
}
wg.Add(1)
counter++
go func(plugin *config.RunningPlugin) {
go func(input *config.RunningInput) {
defer wg.Done()
acc := NewAccumulator(plugin.Config, pointChan)
acc := NewAccumulator(input.Config, pointChan)
acc.SetDebug(a.Config.Agent.Debug)
// acc.SetPrefix(plugin.Name + "_")
// acc.SetPrefix(input.Name + "_")
acc.SetDefaultTags(a.Config.Tags)
if err := plugin.Plugin.Gather(acc); err != nil {
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
if err := input.Input.Gather(acc); err != nil {
log.Printf("Error in input [%s]: %s", input.Name, err)
}
}(plugin)
}(input)
}
if counter == 0 {
@ -121,36 +121,36 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
wg.Wait()
elapsed := time.Since(start)
log.Printf("Gathered metrics, (%s interval), from %d plugins in %s\n",
a.Config.Agent.Interval, counter, elapsed)
log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n",
a.Config.Agent.Interval.Duration, counter, elapsed)
return nil
}
// gatherSeparate runs the plugins that have been configured with their own
// gatherSeparate runs the inputs that have been configured with their own
// reporting interval.
func (a *Agent) gatherSeparate(
shutdown chan struct{},
plugin *config.RunningPlugin,
input *config.RunningInput,
pointChan chan *client.Point,
) error {
ticker := time.NewTicker(plugin.Config.Interval)
ticker := time.NewTicker(input.Config.Interval)
for {
var outerr error
start := time.Now()
acc := NewAccumulator(plugin.Config, pointChan)
acc := NewAccumulator(input.Config, pointChan)
acc.SetDebug(a.Config.Agent.Debug)
// acc.SetPrefix(plugin.Name + "_")
// acc.SetPrefix(input.Name + "_")
acc.SetDefaultTags(a.Config.Tags)
if err := plugin.Plugin.Gather(acc); err != nil {
log.Printf("Error in plugin [%s]: %s", plugin.Name, err)
if err := input.Input.Gather(acc); err != nil {
log.Printf("Error in input [%s]: %s", input.Name, err)
}
elapsed := time.Since(start)
log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
plugin.Config.Interval, plugin.Name, elapsed)
input.Config.Interval, input.Name, elapsed)
if outerr != nil {
return outerr
@ -165,7 +165,7 @@ func (a *Agent) gatherSeparate(
}
}
// Test verifies that we can 'Gather' from all plugins with their configured
// Test verifies that we can 'Gather' from all inputs with their configured
// Config struct
func (a *Agent) Test() error {
shutdown := make(chan struct{})
@ -184,27 +184,27 @@ func (a *Agent) Test() error {
}
}()
for _, plugin := range a.Config.Plugins {
acc := NewAccumulator(plugin.Config, pointChan)
for _, input := range a.Config.Inputs {
acc := NewAccumulator(input.Config, pointChan)
acc.SetDebug(true)
// acc.SetPrefix(plugin.Name + "_")
// acc.SetPrefix(input.Name + "_")
fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name)
if plugin.Config.Interval != 0 {
fmt.Printf("* Internal: %s\n", plugin.Config.Interval)
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
if input.Config.Interval != 0 {
fmt.Printf("* Internal: %s\n", input.Config.Interval)
}
if err := plugin.Plugin.Gather(acc); err != nil {
if err := input.Input.Gather(acc); err != nil {
return err
}
// Special instructions for some plugins. cpu, for example, needs to be
// Special instructions for some inputs. cpu, for example, needs to be
// run twice in order to return cpu usage percentages.
switch plugin.Name {
switch input.Name {
case "cpu", "mongodb":
time.Sleep(500 * time.Millisecond)
fmt.Printf("* Plugin: %s, Collection 2\n", plugin.Name)
if err := plugin.Plugin.Gather(acc); err != nil {
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name)
if err := input.Input.Gather(acc); err != nil {
return err
}
}
@ -332,10 +332,10 @@ func (a *Agent) Run(shutdown chan struct{}) error {
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
"Flush Interval:%s\n",
a.Config.Agent.Interval, a.Config.Agent.Debug,
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval)
a.Config.Agent.Interval.Duration, a.Config.Agent.Debug,
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
// channel shared between all plugin threads for accumulating points
// channel shared between all input threads for accumulating points
pointChan := make(chan *client.Point, 1000)
// Round collection to nearest interval by sleeping
@ -354,29 +354,29 @@ func (a *Agent) Run(shutdown chan struct{}) error {
}
}()
for _, plugin := range a.Config.Plugins {
for _, input := range a.Config.Inputs {
// Start service of any ServicePlugins
switch p := plugin.Plugin.(type) {
case plugins.ServicePlugin:
switch p := input.Input.(type) {
case inputs.ServiceInput:
if err := p.Start(); err != nil {
log.Printf("Service for plugin %s failed to start, exiting\n%s\n",
plugin.Name, err.Error())
log.Printf("Service for input %s failed to start, exiting\n%s\n",
input.Name, err.Error())
return err
}
defer p.Stop()
}
// Special handling for plugins that have their own collection interval
// Special handling for inputs that have their own collection interval
// configured. Default intervals are handled below with gatherParallel
if plugin.Config.Interval != 0 {
if input.Config.Interval != 0 {
wg.Add(1)
go func(plugin *config.RunningPlugin) {
go func(input *config.RunningInput) {
defer wg.Done()
if err := a.gatherSeparate(shutdown, plugin, pointChan); err != nil {
if err := a.gatherSeparate(shutdown, input, pointChan); err != nil {
log.Printf(err.Error())
}
}(plugin)
}(input)
}
}

View File

@ -8,46 +8,46 @@ import (
"github.com/influxdb/telegraf/internal/config"
// needing to load the plugins
_ "github.com/influxdb/telegraf/plugins/all"
_ "github.com/influxdb/telegraf/plugins/inputs/all"
// needing to load the outputs
_ "github.com/influxdb/telegraf/outputs/all"
_ "github.com/influxdb/telegraf/plugins/outputs/all"
)
func TestAgent_LoadPlugin(t *testing.T) {
c := config.NewConfig()
c.PluginFilters = []string{"mysql"}
c.InputFilters = []string{"mysql"}
err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ := NewAgent(c)
assert.Equal(t, 1, len(a.Config.Plugins))
assert.Equal(t, 1, len(a.Config.Inputs))
c = config.NewConfig()
c.PluginFilters = []string{"foo"}
c.InputFilters = []string{"foo"}
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 0, len(a.Config.Plugins))
assert.Equal(t, 0, len(a.Config.Inputs))
c = config.NewConfig()
c.PluginFilters = []string{"mysql", "foo"}
c.InputFilters = []string{"mysql", "foo"}
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 1, len(a.Config.Plugins))
assert.Equal(t, 1, len(a.Config.Inputs))
c = config.NewConfig()
c.PluginFilters = []string{"mysql", "redis"}
c.InputFilters = []string{"mysql", "redis"}
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Plugins))
assert.Equal(t, 2, len(a.Config.Inputs))
c = config.NewConfig()
c.PluginFilters = []string{"mysql", "foo", "redis", "bar"}
c.InputFilters = []string{"mysql", "foo", "redis", "bar"}
err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml")
assert.NoError(t, err)
a, _ = NewAgent(c)
assert.Equal(t, 2, len(a.Config.Plugins))
assert.Equal(t, 2, len(a.Config.Inputs))
}
func TestAgent_LoadOutput(t *testing.T) {

View File

@ -10,8 +10,6 @@ machine:
- go version
dependencies:
cache_directories:
- "~/telegraf-build/src"
override:
- docker info

View File

@ -10,23 +10,23 @@ import (
"github.com/influxdb/telegraf"
"github.com/influxdb/telegraf/internal/config"
_ "github.com/influxdb/telegraf/outputs/all"
_ "github.com/influxdb/telegraf/plugins/all"
_ "github.com/influxdb/telegraf/plugins/inputs/all"
_ "github.com/influxdb/telegraf/plugins/outputs/all"
)
var fDebug = flag.Bool("debug", false,
"show metrics as they're generated to stdout")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
var fConfig = flag.String("config", "", "configuration file to load")
var fConfigDirectory = flag.String("configdirectory", "",
var fConfigDirectory = flag.String("config-directory", "",
"directory containing additional *.conf files")
var fVersion = flag.Bool("version", false, "display the version")
var fSampleConfig = flag.Bool("sample-config", false,
"print out full sample configuration")
var fPidfile = flag.String("pidfile", "", "file to write our pid to")
var fPLuginFilters = flag.String("filter", "",
var fInputFilters = flag.String("input-filter", "",
"filter the plugins to enable, separator is :")
var fOutputFilters = flag.String("outputfilter", "",
var fOutputFilters = flag.String("output-filter", "",
"filter the outputs to enable, separator is :")
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
@ -35,13 +35,53 @@ var fUsage = flag.String("usage", "",
// -ldflags "-X main.Version=`git describe --always --tags`"
var Version string
const usage = `Telegraf, The plugin-driven server agent for reporting metrics into InfluxDB
Usage:
telegraf <flags>
The flags are:
-config <file> configuration file to load
-test gather metrics once, print them to stdout, and exit
-sample-config print out full sample configuration to stdout
-config-directory directory containing additional *.conf files
-input-filter filter the input plugins to enable, separator is :
-output-filter filter the output plugins to enable, separator is :
-usage print usage for a plugin, ie, 'telegraf -usage mysql'
-version print the version to stdout
Examples:
# generate a telegraf config file:
telegraf -sample-config > telegraf.conf
# generate a telegraf config file with only cpu input and influxdb output enabled
telegraf -sample-config -input-filter cpu -output-filter influxdb
# run a single telegraf collection, outputting metrics to stdout
telegraf -config telegraf.conf -test
# run telegraf with all plugins defined in config file
telegraf -config telegraf.conf
# run telegraf, enabling only the cpu and memory inputs and influxdb output
telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb
`
func main() {
flag.Usage = usageExit
flag.Parse()
var pluginFilters []string
if *fPLuginFilters != "" {
pluginsFilter := strings.TrimSpace(*fPLuginFilters)
pluginFilters = strings.Split(":"+pluginsFilter+":", ":")
if flag.NFlag() == 0 {
usageExit()
}
var inputFilters []string
if *fInputFilters != "" {
inputFilter := strings.TrimSpace(*fInputFilters)
inputFilters = strings.Split(":"+inputFilter+":", ":")
}
var outputFilters []string
@ -57,12 +97,12 @@ func main() {
}
if *fSampleConfig {
config.PrintSampleConfig(pluginFilters, outputFilters)
config.PrintSampleConfig(inputFilters, outputFilters)
return
}
if *fUsage != "" {
if err := config.PrintPluginConfig(*fUsage); err != nil {
if err := config.PrintInputConfig(*fUsage); err != nil {
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
log.Fatalf("%s and %s", err, err2)
}
@ -78,7 +118,7 @@ func main() {
if *fConfig != "" {
c = config.NewConfig()
c.OutputFilters = outputFilters
c.PluginFilters = pluginFilters
c.InputFilters = inputFilters
err = c.LoadConfig(*fConfig)
if err != nil {
log.Fatal(err)
@ -98,7 +138,7 @@ func main() {
if len(c.Outputs) == 0 {
log.Fatalf("Error: no outputs found, did you provide a valid config file?")
}
if len(c.Plugins) == 0 {
if len(c.Inputs) == 0 {
log.Fatalf("Error: no plugins found, did you provide a valid config file?")
}
@ -134,7 +174,7 @@ func main() {
log.Printf("Starting Telegraf (version %s)\n", Version)
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("Loaded plugins: %s", strings.Join(c.PluginNames(), " "))
log.Printf("Loaded plugins: %s", strings.Join(c.InputNames(), " "))
log.Printf("Tags enabled: %s", c.ListTags())
if *fPidfile != "" {
@ -150,3 +190,8 @@ func main() {
ag.Run(shutdown)
}
func usageExit() {
fmt.Println(usage)
os.Exit(0)
}

View File

@ -1,7 +1,7 @@
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# declared inputs.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
@ -76,13 +76,13 @@
###############################################################################
# PLUGINS #
# INPUTS #
###############################################################################
[plugins]
[inputs]
# Read metrics about cpu usage
[[plugins.cpu]]
[[inputs.cpu]]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
@ -91,13 +91,13 @@
drop = ["cpu_time"]
# Read metrics about disk usage by mount point
[[plugins.disk]]
[[inputs.disk]]
# By default, telegraf gather stats for all mountpoints.
# Setting mountpoints will restrict the stats to the specified mountpoints.
# Mountpoints=["/"]
# Read metrics about disk IO by device
[[plugins.diskio]]
[[inputs.diskio]]
# By default, telegraf will gather stats for all devices including
# disk partitions.
# Setting devices will restrict the stats to the specified devices.
@ -106,18 +106,18 @@
# SkipSerialNumber = true
# Read metrics about memory usage
[[plugins.mem]]
[[inputs.mem]]
# no configuration
# Read metrics about swap memory usage
[[plugins.swap]]
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[plugins.system]]
[[inputs.system]]
# no configuration
###############################################################################
# SERVICE PLUGINS #
# SERVICE INPUTS #
###############################################################################

View File

@ -11,8 +11,8 @@ import (
"time"
"github.com/influxdb/telegraf/internal"
"github.com/influxdb/telegraf/outputs"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"github.com/influxdb/telegraf/plugins/outputs"
"github.com/naoina/toml"
"github.com/naoina/toml/ast"
@ -25,11 +25,11 @@ import (
// specified
type Config struct {
Tags map[string]string
PluginFilters []string
InputFilters []string
OutputFilters []string
Agent *AgentConfig
Plugins []*RunningPlugin
Inputs []*RunningInput
Outputs []*RunningOutput
}
@ -45,9 +45,9 @@ func NewConfig() *Config {
},
Tags: make(map[string]string),
Plugins: make([]*RunningPlugin, 0),
Inputs: make([]*RunningInput, 0),
Outputs: make([]*RunningOutput, 0),
PluginFilters: make([]string, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
return c
@ -93,10 +93,10 @@ type RunningOutput struct {
Config *OutputConfig
}
type RunningPlugin struct {
type RunningInput struct {
Name string
Plugin plugins.Plugin
Config *PluginConfig
Input inputs.Input
Config *InputConfig
}
// Filter containing drop/pass and tagdrop/tagpass rules
@ -110,8 +110,8 @@ type Filter struct {
IsActive bool
}
// PluginConfig containing a name, interval, and filter
type PluginConfig struct {
// InputConfig containing a name, interval, and filter
type InputConfig struct {
Name string
NameOverride string
MeasurementPrefix string
@ -204,16 +204,16 @@ func (f Filter) ShouldTagsPass(tags map[string]string) bool {
return true
}
// Plugins returns a list of strings of the configured plugins.
func (c *Config) PluginNames() []string {
// Inputs returns a list of strings of the configured inputs.
func (c *Config) InputNames() []string {
var name []string
for _, plugin := range c.Plugins {
name = append(name, plugin.Name)
for _, input := range c.Inputs {
name = append(name, input.Name)
}
return name
}
// Outputs returns a list of strings of the configured plugins.
// Outputs returns a list of strings of the configured inputs.
func (c *Config) OutputNames() []string {
var name []string
for _, output := range c.Outputs {
@ -239,7 +239,7 @@ func (c *Config) ListTags() string {
var header = `# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# declared inputs.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
@ -263,7 +263,7 @@ var header = `# Telegraf configuration
# Configuration for telegraf agent
[agent]
# Default data collection interval for all plugins
# Default data collection interval for all inputs
interval = "10s"
# Rounds collection interval to 'interval'
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
@ -293,16 +293,16 @@ var header = `# Telegraf configuration
var pluginHeader = `
###############################################################################
# PLUGINS #
# INPUTS #
###############################################################################
[plugins]
[inputs]
`
var servicePluginHeader = `
var serviceInputHeader = `
###############################################################################
# SERVICE PLUGINS #
# SERVICE INPUTS #
###############################################################################
`
@ -326,35 +326,35 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
printConfig(oname, output, "outputs")
}
// Filter plugins
// Filter inputs
var pnames []string
for pname := range plugins.Plugins {
for pname := range inputs.Inputs {
if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Plugins
// Print Inputs
fmt.Printf(pluginHeader)
servPlugins := make(map[string]plugins.ServicePlugin)
servInputs := make(map[string]inputs.ServiceInput)
for _, pname := range pnames {
creator := plugins.Plugins[pname]
plugin := creator()
creator := inputs.Inputs[pname]
input := creator()
switch p := plugin.(type) {
case plugins.ServicePlugin:
servPlugins[pname] = p
switch p := input.(type) {
case inputs.ServiceInput:
servInputs[pname] = p
continue
}
printConfig(pname, plugin, "plugins")
printConfig(pname, input, "inputs")
}
// Print Service Plugins
fmt.Printf(servicePluginHeader)
for name, plugin := range servPlugins {
printConfig(name, plugin, "plugins")
// Print Service Inputs
fmt.Printf(serviceInputHeader)
for name, input := range servInputs {
printConfig(name, input, "inputs")
}
}
@ -382,12 +382,12 @@ func sliceContains(name string, list []string) bool {
return false
}
// PrintPluginConfig prints the config usage of a single plugin.
func PrintPluginConfig(name string) error {
if creator, ok := plugins.Plugins[name]; ok {
printConfig(name, creator(), "plugins")
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig(name string) error {
if creator, ok := inputs.Inputs[name]; ok {
printConfig(name, creator(), "inputs")
} else {
return errors.New(fmt.Sprintf("Plugin %s not found", name))
return errors.New(fmt.Sprintf("Input %s not found", name))
}
return nil
}
@ -453,33 +453,15 @@ func (c *Config) LoadConfig(path string) error {
return err
}
case "outputs":
for outputName, outputVal := range subTable.Fields {
switch outputSubTable := outputVal.(type) {
case *ast.Table:
if err = c.addOutput(outputName, outputSubTable); err != nil {
return err
}
case []*ast.Table:
for _, t := range outputSubTable {
if err = c.addOutput(outputName, t); err != nil {
return err
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
outputName)
}
}
case "plugins":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case *ast.Table:
if err = c.addPlugin(pluginName, pluginSubTable); err != nil {
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
return err
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addPlugin(pluginName, t); err != nil {
if err = c.addOutput(pluginName, t); err != nil {
return err
}
}
@ -488,10 +470,28 @@ func (c *Config) LoadConfig(path string) error {
pluginName)
}
}
// Assume it's a plugin for legacy config file support if no other
case "inputs":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil {
return err
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addInput(pluginName, t); err != nil {
return err
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
}
// Assume it's an input input for legacy config file support if no other
// identifiers are present
default:
if err = c.addPlugin(name, subTable); err != nil {
if err = c.addInput(name, subTable); err != nil {
return err
}
}
@ -527,41 +527,41 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
return nil
}
func (c *Config) addPlugin(name string, table *ast.Table) error {
if len(c.PluginFilters) > 0 && !sliceContains(name, c.PluginFilters) {
func (c *Config) addInput(name string, table *ast.Table) error {
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
return nil
}
// Legacy support renaming io plugin to diskio
// Legacy support renaming io input to diskio
if name == "io" {
name = "diskio"
}
creator, ok := plugins.Plugins[name]
creator, ok := inputs.Inputs[name]
if !ok {
return fmt.Errorf("Undefined but requested plugin: %s", name)
return fmt.Errorf("Undefined but requested input: %s", name)
}
plugin := creator()
input := creator()
pluginConfig, err := buildPlugin(name, table)
pluginConfig, err := buildInput(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, plugin); err != nil {
if err := toml.UnmarshalTable(table, input); err != nil {
return err
}
rp := &RunningPlugin{
rp := &RunningInput{
Name: name,
Plugin: plugin,
Input: input,
Config: pluginConfig,
}
c.Plugins = append(c.Plugins, rp)
c.Inputs = append(c.Inputs, rp)
return nil
}
// buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to
// be inserted into the OutputConfig/PluginConfig to be used for prefix
// be inserted into the OutputConfig/InputConfig to be used for prefix
// filtering on tags and measurements
func buildFilter(tbl *ast.Table) Filter {
f := Filter{}
@ -637,11 +637,11 @@ func buildFilter(tbl *ast.Table) Filter {
return f
}
// buildPlugin parses plugin specific items from the ast.Table,
// buildInput parses input specific items from the ast.Table,
// builds the filter and returns a
// PluginConfig to be inserted into RunningPlugin
func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) {
cp := &PluginConfig{Name: name}
// InputConfig to be inserted into RunningInput
func buildInput(name string, tbl *ast.Table) (*InputConfig, error) {
cp := &InputConfig{Name: name}
if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
@ -683,7 +683,7 @@ func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) {
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
log.Printf("Could not parse tags for plugin %s\n", name)
log.Printf("Could not parse tags for input %s\n", name)
}
}
}
@ -698,7 +698,7 @@ func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) {
}
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
// OutputConfig to be inserted into RunningPlugin
// OutputConfig to be inserted into RunningInput
// Note: error exists in the return for future calls that might require error
func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) {
oc := &OutputConfig{

View File

@ -4,21 +4,21 @@ import (
"testing"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/exec"
"github.com/influxdb/telegraf/plugins/memcached"
"github.com/influxdb/telegraf/plugins/procstat"
"github.com/influxdb/telegraf/plugins/inputs"
"github.com/influxdb/telegraf/plugins/inputs/exec"
"github.com/influxdb/telegraf/plugins/inputs/memcached"
"github.com/influxdb/telegraf/plugins/inputs/procstat"
"github.com/stretchr/testify/assert"
)
func TestConfig_LoadSinglePlugin(t *testing.T) {
func TestConfig_LoadSingleInput(t *testing.T) {
c := NewConfig()
c.LoadConfig("./testdata/single_plugin.toml")
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
mConfig := &PluginConfig{
mConfig := &InputConfig{
Name: "memcached",
Filter: Filter{
Drop: []string{"other", "stuff"},
@ -41,9 +41,9 @@ func TestConfig_LoadSinglePlugin(t *testing.T) {
}
mConfig.Tags = make(map[string]string)
assert.Equal(t, memcached, c.Plugins[0].Plugin,
assert.Equal(t, memcached, c.Inputs[0].Input,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Plugins[0].Config,
assert.Equal(t, mConfig, c.Inputs[0].Config,
"Testdata did not produce correct memcached metadata.")
}
@ -58,10 +58,10 @@ func TestConfig_LoadDirectory(t *testing.T) {
t.Error(err)
}
memcached := plugins.Plugins["memcached"]().(*memcached.Memcached)
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"localhost"}
mConfig := &PluginConfig{
mConfig := &InputConfig{
Name: "memcached",
Filter: Filter{
Drop: []string{"other", "stuff"},
@ -84,36 +84,38 @@ func TestConfig_LoadDirectory(t *testing.T) {
}
mConfig.Tags = make(map[string]string)
assert.Equal(t, memcached, c.Plugins[0].Plugin,
assert.Equal(t, memcached, c.Inputs[0].Input,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Plugins[0].Config,
assert.Equal(t, mConfig, c.Inputs[0].Config,
"Testdata did not produce correct memcached metadata.")
ex := plugins.Plugins["exec"]().(*exec.Exec)
ex := inputs.Inputs["exec"]().(*exec.Exec)
ex.Command = "/usr/bin/myothercollector --foo=bar"
ex.Name = "myothercollector"
eConfig := &PluginConfig{Name: "exec"}
eConfig := &InputConfig{
Name: "exec",
MeasurementSuffix: "_myothercollector",
}
eConfig.Tags = make(map[string]string)
assert.Equal(t, ex, c.Plugins[1].Plugin,
assert.Equal(t, ex, c.Inputs[1].Input,
"Merged Testdata did not produce a correct exec struct.")
assert.Equal(t, eConfig, c.Plugins[1].Config,
assert.Equal(t, eConfig, c.Inputs[1].Config,
"Merged Testdata did not produce correct exec metadata.")
memcached.Servers = []string{"192.168.1.1"}
assert.Equal(t, memcached, c.Plugins[2].Plugin,
assert.Equal(t, memcached, c.Inputs[2].Input,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Plugins[2].Config,
assert.Equal(t, mConfig, c.Inputs[2].Config,
"Testdata did not produce correct memcached metadata.")
pstat := plugins.Plugins["procstat"]().(*procstat.Procstat)
pstat := inputs.Inputs["procstat"]().(*procstat.Procstat)
pstat.PidFile = "/var/run/grafana-server.pid"
pConfig := &PluginConfig{Name: "procstat"}
pConfig := &InputConfig{Name: "procstat"}
pConfig.Tags = make(map[string]string)
assert.Equal(t, pstat, c.Plugins[3].Plugin,
assert.Equal(t, pstat, c.Inputs[3].Input,
"Merged Testdata did not produce a correct procstat struct.")
assert.Equal(t, pConfig, c.Plugins[3].Config,
assert.Equal(t, pConfig, c.Inputs[3].Config,
"Merged Testdata did not produce correct procstat metadata.")
}

View File

@ -1,9 +1,9 @@
[[plugins.memcached]]
[[inputs.memcached]]
servers = ["localhost"]
pass = ["some", "strings"]
drop = ["other", "stuff"]
interval = "5s"
[plugins.memcached.tagpass]
[inputs.memcached.tagpass]
goodtag = ["mytag"]
[plugins.memcached.tagdrop]
[inputs.memcached.tagdrop]
badtag = ["othertag"]

View File

@ -1,6 +1,4 @@
[[plugins.exec]]
[[inputs.exec]]
# the command to run
command = "/usr/bin/myothercollector --foo=bar"
# name of the command (used as a prefix for measurements)
name = "myothercollector"
name_suffix = "_myothercollector"

View File

@ -1,9 +1,9 @@
[[plugins.memcached]]
[[inputs.memcached]]
servers = ["192.168.1.1"]
pass = ["some", "strings"]
drop = ["other", "stuff"]
interval = "5s"
[plugins.memcached.tagpass]
[inputs.memcached.tagpass]
goodtag = ["mytag"]
[plugins.memcached.tagdrop]
[inputs.memcached.tagdrop]
badtag = ["othertag"]

View File

@ -1,2 +1,2 @@
[[plugins.procstat]]
[[inputs.procstat]]
pid_file = "/var/run/grafana-server.pid"

View File

@ -1,7 +1,7 @@
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared plugins.
# declared inputs.
# Even if a plugin has no configuration, it must be declared in here
# to be active. Declaring a plugin means just specifying the name
@ -70,15 +70,15 @@
# PLUGINS #
###############################################################################
[plugins]
[inputs]
# Read Apache status information (mod_status)
[[plugins.apache]]
[[inputs.apache]]
# An array of Apache status URI to gather stats.
urls = ["http://localhost/server-status?auto"]
# Read metrics about cpu usage
[[plugins.cpu]]
[[inputs.cpu]]
# Whether to report per-cpu stats or not
percpu = true
# Whether to report total system cpu stats or not
@ -87,11 +87,11 @@
drop = ["cpu_time"]
# Read metrics about disk usage by mount point
[[plugins.diskio]]
[[inputs.diskio]]
# no configuration
# Read metrics from one or many disque servers
[[plugins.disque]]
[[inputs.disque]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
@ -100,7 +100,7 @@
servers = ["localhost"]
# Read stats from one or more Elasticsearch servers or clusters
[[plugins.elasticsearch]]
[[inputs.elasticsearch]]
# specify a list of one or more Elasticsearch servers
servers = ["http://localhost:9200"]
@ -109,15 +109,13 @@
local = true
# Read flattened metrics from one or more commands that output JSON to stdout
[[plugins.exec]]
[[inputs.exec]]
# the command to run
command = "/usr/bin/mycollector --foo=bar"
# name of the command (used as a prefix for measurements)
name = "mycollector"
name_suffix = "_mycollector"
# Read metrics of haproxy, via socket or csv stats page
[[plugins.haproxy]]
[[inputs.haproxy]]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.10.3.33:1936, etc.
#
@ -127,7 +125,7 @@
# servers = ["socket:/run/haproxy/admin.sock"]
# Read flattened metrics from one or more JSON HTTP endpoints
[[plugins.httpjson]]
[[inputs.httpjson]]
# a name for the service being polled
name = "webserver_stats"
@ -146,11 +144,11 @@
threshold = "0.75"
# Read metrics about disk IO by device
[[plugins.diskio]]
[[inputs.diskio]]
# no configuration
# read metrics from a Kafka topic
[[plugins.kafka_consumer]]
[[inputs.kafka_consumer]]
# topic(s) to consume
topics = ["telegraf"]
# an array of Zookeeper connection strings
@ -163,7 +161,7 @@
offset = "oldest"
# Read metrics from a LeoFS Server via SNMP
[[plugins.leofs]]
[[inputs.leofs]]
# An array of URI to gather stats about LeoFS.
# Specify an ip or hostname with port. ie 127.0.0.1:4020
#
@ -171,7 +169,7 @@
servers = ["127.0.0.1:4021"]
# Read metrics from local Lustre service on OST, MDS
[[plugins.lustre2]]
[[inputs.lustre2]]
# An array of /proc globs to search for Lustre stats
# If not specified, the default will work on Lustre 2.5.x
#
@ -179,11 +177,11 @@
# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]
# Read metrics about memory usage
[[plugins.mem]]
[[inputs.mem]]
# no configuration
# Read metrics from one or many memcached servers
[[plugins.memcached]]
[[inputs.memcached]]
# An array of address to gather stats about. Specify an ip on hostname
# with optional port. ie localhost, 10.0.0.1:11211, etc.
#
@ -191,7 +189,7 @@
servers = ["localhost"]
# Read metrics from one or many MongoDB servers
[[plugins.mongodb]]
[[inputs.mongodb]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017,
# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc.
@ -200,7 +198,7 @@
servers = ["127.0.0.1:27017"]
# Read metrics from one or many mysql servers
[[plugins.mysql]]
[[inputs.mysql]]
# specify servers via a url matching:
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# e.g.
@ -211,7 +209,7 @@
servers = ["localhost"]
# Read metrics about network interface usage
[[plugins.net]]
[[inputs.net]]
# By default, telegraf gathers stats from any up interface (excluding loopback)
# Setting interfaces will tell it to gather these explicit interfaces,
# regardless of status.
@ -219,12 +217,12 @@
# interfaces = ["eth0", ... ]
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[plugins.nginx]]
[[inputs.nginx]]
# An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/status"]
# Ping given url(s) and return statistics
[[plugins.ping]]
[[inputs.ping]]
# urls to ping
urls = ["www.google.com"] # required
# number of pings to send (ping -c <COUNT>)
@ -237,7 +235,7 @@
interface = ""
# Read metrics from one or many postgresql servers
[[plugins.postgresql]]
[[inputs.postgresql]]
# specify address via a url matching:
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
# or a simple string:
@ -264,12 +262,12 @@
# address = "influx@remoteserver"
# Read metrics from one or many prometheus clients
[[plugins.prometheus]]
[[inputs.prometheus]]
# An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"]
# Read metrics from one or many RabbitMQ servers via the management API
[[plugins.rabbitmq]]
[[inputs.rabbitmq]]
# Specify servers via an array of tables
# name = "rmq-server-1" # optional tag
# url = "http://localhost:15672"
@ -281,7 +279,7 @@
# nodes = ["rabbit@node1", "rabbit@node2"]
# Read metrics from one or many redis servers
[[plugins.redis]]
[[inputs.redis]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832,
# 10.0.0.1:10000, etc.
@ -290,7 +288,7 @@
servers = ["localhost"]
# Read metrics from one or many RethinkDB servers
[[plugins.rethinkdb]]
[[inputs.rethinkdb]]
# An array of URI to gather stats about. Specify an ip or hostname
# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105,
# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
@ -299,9 +297,9 @@
servers = ["127.0.0.1:28015"]
# Read metrics about swap memory usage
[[plugins.swap]]
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[plugins.system]]
[[inputs.system]]
# no configuration

View File

@ -1,16 +0,0 @@
package all
import (
_ "github.com/influxdb/telegraf/outputs/amon"
_ "github.com/influxdb/telegraf/outputs/amqp"
_ "github.com/influxdb/telegraf/outputs/datadog"
_ "github.com/influxdb/telegraf/outputs/influxdb"
_ "github.com/influxdb/telegraf/outputs/kafka"
_ "github.com/influxdb/telegraf/outputs/kinesis"
_ "github.com/influxdb/telegraf/outputs/librato"
_ "github.com/influxdb/telegraf/outputs/mqtt"
_ "github.com/influxdb/telegraf/outputs/nsq"
_ "github.com/influxdb/telegraf/outputs/opentsdb"
_ "github.com/influxdb/telegraf/outputs/prometheus_client"
_ "github.com/influxdb/telegraf/outputs/riemann"
)

View File

@ -1,37 +0,0 @@
package all
import (
_ "github.com/influxdb/telegraf/plugins/aerospike"
_ "github.com/influxdb/telegraf/plugins/apache"
_ "github.com/influxdb/telegraf/plugins/bcache"
_ "github.com/influxdb/telegraf/plugins/disque"
_ "github.com/influxdb/telegraf/plugins/elasticsearch"
_ "github.com/influxdb/telegraf/plugins/exec"
_ "github.com/influxdb/telegraf/plugins/haproxy"
_ "github.com/influxdb/telegraf/plugins/httpjson"
_ "github.com/influxdb/telegraf/plugins/influxdb"
_ "github.com/influxdb/telegraf/plugins/jolokia"
_ "github.com/influxdb/telegraf/plugins/kafka_consumer"
_ "github.com/influxdb/telegraf/plugins/leofs"
_ "github.com/influxdb/telegraf/plugins/lustre2"
_ "github.com/influxdb/telegraf/plugins/mailchimp"
_ "github.com/influxdb/telegraf/plugins/memcached"
_ "github.com/influxdb/telegraf/plugins/mongodb"
_ "github.com/influxdb/telegraf/plugins/mysql"
_ "github.com/influxdb/telegraf/plugins/nginx"
_ "github.com/influxdb/telegraf/plugins/phpfpm"
_ "github.com/influxdb/telegraf/plugins/ping"
_ "github.com/influxdb/telegraf/plugins/postgresql"
_ "github.com/influxdb/telegraf/plugins/procstat"
_ "github.com/influxdb/telegraf/plugins/prometheus"
_ "github.com/influxdb/telegraf/plugins/puppetagent"
_ "github.com/influxdb/telegraf/plugins/rabbitmq"
_ "github.com/influxdb/telegraf/plugins/redis"
_ "github.com/influxdb/telegraf/plugins/rethinkdb"
_ "github.com/influxdb/telegraf/plugins/statsd"
_ "github.com/influxdb/telegraf/plugins/system"
_ "github.com/influxdb/telegraf/plugins/trig"
_ "github.com/influxdb/telegraf/plugins/twemproxy"
_ "github.com/influxdb/telegraf/plugins/zfs"
_ "github.com/influxdb/telegraf/plugins/zookeeper"
)

View File

@ -4,7 +4,7 @@ import (
"bytes"
"encoding/binary"
"fmt"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"net"
"strconv"
"strings"
@ -119,7 +119,7 @@ func (a *Aerospike) Description() string {
return "Read stats from an aerospike server"
}
func (a *Aerospike) Gather(acc plugins.Accumulator) error {
func (a *Aerospike) Gather(acc inputs.Accumulator) error {
if len(a.Servers) == 0 {
return a.gatherServer("127.0.0.1:3000", acc)
}
@ -140,7 +140,7 @@ func (a *Aerospike) Gather(acc plugins.Accumulator) error {
return outerr
}
func (a *Aerospike) gatherServer(host string, acc plugins.Accumulator) error {
func (a *Aerospike) gatherServer(host string, acc inputs.Accumulator) error {
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
if err != nil {
return fmt.Errorf("Aerospike info failed: %s", err)
@ -249,7 +249,7 @@ func get(key []byte, host string) (map[string]string, error) {
func readAerospikeStats(
stats map[string]string,
acc plugins.Accumulator,
acc inputs.Accumulator,
host string,
namespace string,
) {
@ -336,7 +336,7 @@ func msgLenFromBytes(buf [6]byte) int64 {
}
func init() {
plugins.Add("aerospike", func() plugins.Plugin {
inputs.Add("aerospike", func() inputs.Input {
return &Aerospike{}
})
}

37
plugins/inputs/all/all.go Normal file
View File

@ -0,0 +1,37 @@
package all
import (
_ "github.com/influxdb/telegraf/plugins/inputs/aerospike"
_ "github.com/influxdb/telegraf/plugins/inputs/apache"
_ "github.com/influxdb/telegraf/plugins/inputs/bcache"
_ "github.com/influxdb/telegraf/plugins/inputs/disque"
_ "github.com/influxdb/telegraf/plugins/inputs/elasticsearch"
_ "github.com/influxdb/telegraf/plugins/inputs/exec"
_ "github.com/influxdb/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdb/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdb/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdb/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdb/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdb/telegraf/plugins/inputs/leofs"
_ "github.com/influxdb/telegraf/plugins/inputs/lustre2"
_ "github.com/influxdb/telegraf/plugins/inputs/mailchimp"
_ "github.com/influxdb/telegraf/plugins/inputs/memcached"
_ "github.com/influxdb/telegraf/plugins/inputs/mongodb"
_ "github.com/influxdb/telegraf/plugins/inputs/mysql"
_ "github.com/influxdb/telegraf/plugins/inputs/nginx"
_ "github.com/influxdb/telegraf/plugins/inputs/phpfpm"
_ "github.com/influxdb/telegraf/plugins/inputs/ping"
_ "github.com/influxdb/telegraf/plugins/inputs/postgresql"
_ "github.com/influxdb/telegraf/plugins/inputs/procstat"
_ "github.com/influxdb/telegraf/plugins/inputs/prometheus"
_ "github.com/influxdb/telegraf/plugins/inputs/puppetagent"
_ "github.com/influxdb/telegraf/plugins/inputs/rabbitmq"
_ "github.com/influxdb/telegraf/plugins/inputs/redis"
_ "github.com/influxdb/telegraf/plugins/inputs/rethinkdb"
_ "github.com/influxdb/telegraf/plugins/inputs/statsd"
_ "github.com/influxdb/telegraf/plugins/inputs/system"
_ "github.com/influxdb/telegraf/plugins/inputs/trig"
_ "github.com/influxdb/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdb/telegraf/plugins/inputs/zfs"
_ "github.com/influxdb/telegraf/plugins/inputs/zookeeper"
)

View File

@ -11,7 +11,7 @@ import (
"sync"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type Apache struct {
@ -31,7 +31,7 @@ func (n *Apache) Description() string {
return "Read Apache status information (mod_status)"
}
func (n *Apache) Gather(acc plugins.Accumulator) error {
func (n *Apache) Gather(acc inputs.Accumulator) error {
var wg sync.WaitGroup
var outerr error
@ -59,7 +59,7 @@ var tr = &http.Transport{
var client = &http.Client{Transport: tr}
func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
func (n *Apache) gatherUrl(addr *url.URL, acc inputs.Accumulator) error {
resp, err := client.Get(addr.String())
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
@ -164,7 +164,7 @@ func getTags(addr *url.URL) map[string]string {
}
func init() {
plugins.Add("apache", func() plugins.Plugin {
inputs.Add("apache", func() inputs.Input {
return &Apache{}
})
}

View File

@ -70,7 +70,7 @@ Using this configuration:
When run with:
```
./telegraf -config telegraf.conf -filter bcache -test
./telegraf -config telegraf.conf -input-filter bcache -test
```
It produces:

View File

@ -8,7 +8,7 @@ import (
"strconv"
"strings"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type Bcache struct {
@ -69,7 +69,7 @@ func prettyToBytes(v string) uint64 {
return uint64(result)
}
func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
func (b *Bcache) gatherBcache(bdev string, acc inputs.Accumulator) error {
tags := getTags(bdev)
metrics, err := filepath.Glob(bdev + "/stats_total/*")
if len(metrics) < 0 {
@ -104,7 +104,7 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
return nil
}
func (b *Bcache) Gather(acc plugins.Accumulator) error {
func (b *Bcache) Gather(acc inputs.Accumulator) error {
bcacheDevsChecked := make(map[string]bool)
var restrictDevs bool
if len(b.BcacheDevs) != 0 {
@ -135,7 +135,7 @@ func (b *Bcache) Gather(acc plugins.Accumulator) error {
}
func init() {
plugins.Add("bcache", func() plugins.Plugin {
inputs.Add("bcache", func() inputs.Input {
return &Bcache{}
})
}

View File

@ -10,7 +10,7 @@ import (
"strings"
"sync"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type Disque struct {
@ -61,7 +61,7 @@ var ErrProtocolError = errors.New("disque protocol error")
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *Disque) Gather(acc plugins.Accumulator) error {
func (g *Disque) Gather(acc inputs.Accumulator) error {
if len(g.Servers) == 0 {
url := &url.URL{
Host: ":7711",
@ -98,7 +98,7 @@ func (g *Disque) Gather(acc plugins.Accumulator) error {
const defaultPort = "7711"
func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
func (g *Disque) gatherServer(addr *url.URL, acc inputs.Accumulator) error {
if g.c == nil {
_, _, err := net.SplitHostPort(addr.Host)
@ -198,7 +198,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
}
func init() {
plugins.Add("disque", func() plugins.Plugin {
inputs.Add("disque", func() inputs.Input {
return &Disque{}
})
}

View File

@ -7,7 +7,7 @@ import (
"time"
"github.com/influxdb/telegraf/internal"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
const statsPath = "/_nodes/stats"
@ -92,7 +92,7 @@ func (e *Elasticsearch) Description() string {
// Gather reads the stats from Elasticsearch and writes it to the
// Accumulator.
func (e *Elasticsearch) Gather(acc plugins.Accumulator) error {
func (e *Elasticsearch) Gather(acc inputs.Accumulator) error {
for _, serv := range e.Servers {
var url string
if e.Local {
@ -110,7 +110,7 @@ func (e *Elasticsearch) Gather(acc plugins.Accumulator) error {
return nil
}
func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) error {
func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) error {
nodeStats := &struct {
ClusterName string `json:"cluster_name"`
Nodes map[string]*node `json:"nodes"`
@ -155,7 +155,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) err
return nil
}
func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) error {
func (e *Elasticsearch) gatherClusterStats(url string, acc inputs.Accumulator) error {
clusterStats := &clusterHealth{}
if err := e.gatherData(url, clusterStats); err != nil {
return err
@ -220,7 +220,7 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error {
}
func init() {
plugins.Add("elasticsearch", func() plugins.Plugin {
inputs.Add("elasticsearch", func() inputs.Input {
return NewElasticsearch()
})
}

View File

@ -9,20 +9,19 @@ import (
"github.com/gonuts/go-shellquote"
"github.com/influxdb/telegraf/internal"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
const sampleConfig = `
# the command to run
command = "/usr/bin/mycollector --foo=bar"
# name of the command (used as a prefix for measurements)
name = "mycollector"
# measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
`
type Exec struct {
Command string
Name string
runner Runner
}
@ -62,7 +61,7 @@ func (e *Exec) Description() string {
return "Read flattened metrics from one or more commands that output JSON to stdout"
}
func (e *Exec) Gather(acc plugins.Accumulator) error {
func (e *Exec) Gather(acc inputs.Accumulator) error {
out, err := e.runner.Run(e)
if err != nil {
return err
@ -81,18 +80,12 @@ func (e *Exec) Gather(acc plugins.Accumulator) error {
return err
}
var msrmnt_name string
if e.Name == "" {
msrmnt_name = "exec"
} else {
msrmnt_name = "exec_" + e.Name
}
acc.AddFields(msrmnt_name, f.Fields, nil)
acc.AddFields("exec", f.Fields, nil)
return nil
}
func init() {
plugins.Add("exec", func() plugins.Plugin {
inputs.Add("exec", func() inputs.Input {
return NewExec()
})
}

View File

@ -54,7 +54,6 @@ func TestExec(t *testing.T) {
e := &Exec{
runner: newRunnerMock([]byte(validJson), nil),
Command: "testcommand arg1",
Name: "mycollector",
}
var acc testutil.Accumulator
@ -68,14 +67,13 @@ func TestExec(t *testing.T) {
"cpu_free": float64(32),
"percent": float64(0.81),
}
acc.AssertContainsFields(t, "exec_mycollector", fields)
acc.AssertContainsFields(t, "exec", fields)
}
func TestExecMalformed(t *testing.T) {
e := &Exec{
runner: newRunnerMock([]byte(malformedJson), nil),
Command: "badcommand arg1",
Name: "mycollector",
}
var acc testutil.Accumulator
@ -88,7 +86,6 @@ func TestCommandError(t *testing.T) {
e := &Exec{
runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")),
Command: "badcommand",
Name: "mycollector",
}
var acc testutil.Accumulator

View File

@ -3,7 +3,7 @@ package haproxy
import (
"encoding/csv"
"fmt"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"io"
"net/http"
"net/url"
@ -104,7 +104,7 @@ func (r *haproxy) Description() string {
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *haproxy) Gather(acc plugins.Accumulator) error {
func (g *haproxy) Gather(acc inputs.Accumulator) error {
if len(g.Servers) == 0 {
return g.gatherServer("http://127.0.0.1:1936", acc)
}
@ -126,7 +126,7 @@ func (g *haproxy) Gather(acc plugins.Accumulator) error {
return outerr
}
func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error {
func (g *haproxy) gatherServer(addr string, acc inputs.Accumulator) error {
if g.client == nil {
client := &http.Client{}
@ -156,7 +156,7 @@ func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error {
return importCsvResult(res.Body, acc, u.Host)
}
func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error {
func importCsvResult(r io.Reader, acc inputs.Accumulator, host string) error {
csv := csv.NewReader(r)
result, err := csv.ReadAll()
now := time.Now()
@ -358,7 +358,7 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error {
}
func init() {
plugins.Add("haproxy", func() plugins.Plugin {
inputs.Add("haproxy", func() inputs.Input {
return &haproxy{}
})
}

View File

@ -11,7 +11,7 @@ import (
"sync"
"github.com/influxdb/telegraf/internal"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type HttpJson struct {
@ -63,7 +63,7 @@ var sampleConfig = `
# ]
# HTTP parameters (all values must be strings)
[plugins.httpjson.parameters]
[inputs.httpjson.parameters]
event_type = "cpu_spike"
threshold = "0.75"
`
@ -77,7 +77,7 @@ func (h *HttpJson) Description() string {
}
// Gathers data for all servers.
func (h *HttpJson) Gather(acc plugins.Accumulator) error {
func (h *HttpJson) Gather(acc inputs.Accumulator) error {
var wg sync.WaitGroup
errorChannel := make(chan error, len(h.Servers))
@ -116,7 +116,7 @@ func (h *HttpJson) Gather(acc plugins.Accumulator) error {
// Returns:
// error: Any error that may have occurred
func (h *HttpJson) gatherServer(
acc plugins.Accumulator,
acc inputs.Accumulator,
serverURL string,
) error {
resp, err := h.sendRequest(serverURL)
@ -210,7 +210,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, error) {
}
func init() {
plugins.Add("httpjson", func() plugins.Plugin {
inputs.Add("httpjson", func() inputs.Input {
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
})
}

View File

@ -5,7 +5,7 @@ The influxdb plugin collects InfluxDB-formatted data from JSON endpoints.
With a configuration of:
```toml
[[plugins.influxdb]]
[[inputs.influxdb]]
urls = [
"http://127.0.0.1:8086/debug/vars",
"http://192.168.2.1:8086/debug/vars"

View File

@ -8,7 +8,7 @@ import (
"strings"
"sync"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type InfluxDB struct {
@ -32,7 +32,7 @@ func (*InfluxDB) SampleConfig() string {
`
}
func (i *InfluxDB) Gather(acc plugins.Accumulator) error {
func (i *InfluxDB) Gather(acc inputs.Accumulator) error {
errorChannel := make(chan error, len(i.URLs))
var wg sync.WaitGroup
@ -77,7 +77,7 @@ type point struct {
// Returns:
// error: Any error that may have occurred
func (i *InfluxDB) gatherURL(
acc plugins.Accumulator,
acc inputs.Accumulator,
url string,
) error {
resp, err := http.Get(url)
@ -140,7 +140,7 @@ func (i *InfluxDB) gatherURL(
}
func init() {
plugins.Add("influxdb", func() plugins.Plugin {
inputs.Add("influxdb", func() inputs.Input {
return &InfluxDB{}
})
}

View File

@ -5,7 +5,7 @@ import (
"net/http/httptest"
"testing"
"github.com/influxdb/telegraf/plugins/influxdb"
"github.com/influxdb/telegraf/plugins/inputs/influxdb"
"github.com/influxdb/telegraf/testutil"
"github.com/stretchr/testify/require"
)

View File

@ -8,7 +8,7 @@ import (
"net/http"
"net/url"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type Server struct {
@ -49,7 +49,7 @@ func (j *Jolokia) SampleConfig() string {
context = "/jolokia/read"
# List of servers exposing jolokia read service
[[plugins.jolokia.servers]]
[[inputs.jolokia.servers]]
name = "stable"
host = "192.168.103.2"
port = "8180"
@ -59,7 +59,7 @@ func (j *Jolokia) SampleConfig() string {
# List of metrics collected on above servers
# Each metric consists in a name, a jmx path and either a pass or drop slice attributes
# This collect all heap memory usage metrics
[[plugins.jolokia.metrics]]
[[inputs.jolokia.metrics]]
name = "heap_memory_usage"
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
`
@ -108,7 +108,7 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
return jsonOut, nil
}
func (j *Jolokia) Gather(acc plugins.Accumulator) error {
func (j *Jolokia) Gather(acc inputs.Accumulator) error {
context := j.Context //"/jolokia/read"
servers := j.Servers
metrics := j.Metrics
@ -157,7 +157,7 @@ func (j *Jolokia) Gather(acc plugins.Accumulator) error {
}
func init() {
plugins.Add("jolokia", func() plugins.Plugin {
inputs.Add("jolokia", func() inputs.Input {
return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}}
})
}

View File

@ -6,7 +6,7 @@ import (
"sync"
"github.com/influxdb/influxdb/models"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"github.com/Shopify/sarama"
"github.com/wvanbergen/kafka/consumergroup"
@ -148,7 +148,7 @@ func (k *Kafka) Stop() {
}
}
func (k *Kafka) Gather(acc plugins.Accumulator) error {
func (k *Kafka) Gather(acc inputs.Accumulator) error {
k.Lock()
defer k.Unlock()
npoints := len(k.pointChan)
@ -160,7 +160,7 @@ func (k *Kafka) Gather(acc plugins.Accumulator) error {
}
func init() {
plugins.Add("kafka_consumer", func() plugins.Plugin {
inputs.Add("kafka_consumer", func() inputs.Input {
return &Kafka{}
})
}

View File

@ -3,7 +3,7 @@ package leofs
import (
"bufio"
"fmt"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"net/url"
"os/exec"
"strconv"
@ -146,7 +146,7 @@ func (l *LeoFS) Description() string {
return "Read metrics from a LeoFS Server via SNMP"
}
func (l *LeoFS) Gather(acc plugins.Accumulator) error {
func (l *LeoFS) Gather(acc inputs.Accumulator) error {
if len(l.Servers) == 0 {
l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc)
return nil
@ -176,7 +176,7 @@ func (l *LeoFS) Gather(acc plugins.Accumulator) error {
return outerr
}
func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins.Accumulator) error {
func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc inputs.Accumulator) error {
cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid)
stdout, err := cmd.StdoutPipe()
if err != nil {
@ -225,7 +225,7 @@ func retrieveTokenAfterColon(line string) (string, error) {
}
func init() {
plugins.Add("leofs", func() plugins.Plugin {
inputs.Add("leofs", func() inputs.Input {
return &LeoFS{}
})
}

View File

@ -14,7 +14,7 @@ import (
"strings"
"github.com/influxdb/telegraf/internal"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
// Lustre proc files can change between versions, so we want to future-proof
@ -129,7 +129,7 @@ var wanted_mds_fields = []*mapping{
},
}
func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc plugins.Accumulator) error {
func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc inputs.Accumulator) error {
files, err := filepath.Glob(fileglob)
if err != nil {
return err
@ -193,7 +193,7 @@ func (l *Lustre2) Description() string {
}
// Gather reads stats from all lustre targets
func (l *Lustre2) Gather(acc plugins.Accumulator) error {
func (l *Lustre2) Gather(acc inputs.Accumulator) error {
l.allFields = make(map[string]map[string]interface{})
if len(l.Ost_procfiles) == 0 {
@ -244,7 +244,7 @@ func (l *Lustre2) Gather(acc plugins.Accumulator) error {
}
func init() {
plugins.Add("lustre2", func() plugins.Plugin {
inputs.Add("lustre2", func() inputs.Input {
return &Lustre2{}
})
}

View File

@ -4,7 +4,7 @@ import (
"fmt"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type MailChimp struct {
@ -34,7 +34,7 @@ func (m *MailChimp) Description() string {
return "Gathers metrics from the /3.0/reports MailChimp API"
}
func (m *MailChimp) Gather(acc plugins.Accumulator) error {
func (m *MailChimp) Gather(acc inputs.Accumulator) error {
if m.api == nil {
m.api = NewChimpAPI(m.ApiKey)
}
@ -71,7 +71,7 @@ func (m *MailChimp) Gather(acc plugins.Accumulator) error {
return nil
}
func gatherReport(acc plugins.Accumulator, report Report, now time.Time) {
func gatherReport(acc inputs.Accumulator, report Report, now time.Time) {
tags := make(map[string]string)
tags["id"] = report.ID
tags["campaign_title"] = report.CampaignTitle
@ -110,7 +110,7 @@ func gatherReport(acc plugins.Accumulator, report Report, now time.Time) {
}
func init() {
plugins.Add("mailchimp", func() plugins.Plugin {
inputs.Add("mailchimp", func() inputs.Input {
return &MailChimp{}
})
}

View File

@ -8,7 +8,7 @@ import (
"strconv"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
// Memcached is a memcached plugin
@ -69,7 +69,7 @@ func (m *Memcached) Description() string {
}
// Gather reads stats from all configured servers accumulates stats
func (m *Memcached) Gather(acc plugins.Accumulator) error {
func (m *Memcached) Gather(acc inputs.Accumulator) error {
if len(m.Servers) == 0 && len(m.UnixSockets) == 0 {
return m.gatherServer(":11211", false, acc)
}
@ -92,7 +92,7 @@ func (m *Memcached) Gather(acc plugins.Accumulator) error {
func (m *Memcached) gatherServer(
address string,
unix bool,
acc plugins.Accumulator,
acc inputs.Accumulator,
) error {
var conn net.Conn
if unix {
@ -178,7 +178,7 @@ func parseResponse(r *bufio.Reader) (map[string]string, error) {
}
func init() {
plugins.Add("memcached", func() plugins.Plugin {
inputs.Add("memcached", func() inputs.Input {
return &Memcached{}
})
}

View File

@ -1,4 +1,4 @@
package plugins
package inputs
import "github.com/stretchr/testify/mock"

View File

@ -9,7 +9,7 @@ import (
"sync"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"gopkg.in/mgo.v2"
)
@ -45,7 +45,7 @@ var localhost = &url.URL{Host: "127.0.0.1:27017"}
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (m *MongoDB) Gather(acc plugins.Accumulator) error {
func (m *MongoDB) Gather(acc inputs.Accumulator) error {
if len(m.Servers) == 0 {
m.gatherServer(m.getMongoServer(localhost), acc)
return nil
@ -88,7 +88,7 @@ func (m *MongoDB) getMongoServer(url *url.URL) *Server {
return m.mongos[url.Host]
}
func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error {
func (m *MongoDB) gatherServer(server *Server, acc inputs.Accumulator) error {
if server.Session == nil {
var dialAddrs []string
if server.Url.User != nil {
@ -138,7 +138,7 @@ func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error {
}
func init() {
plugins.Add("mongodb", func() plugins.Plugin {
inputs.Add("mongodb", func() inputs.Input {
return &MongoDB{
mongos: make(map[string]*Server),
}

View File

@ -5,7 +5,7 @@ import (
"reflect"
"strconv"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type MongodbData struct {
@ -97,7 +97,7 @@ func (d *MongodbData) add(key string, val interface{}) {
d.Fields[key] = val
}
func (d *MongodbData) flush(acc plugins.Accumulator) {
func (d *MongodbData) flush(acc inputs.Accumulator) {
acc.AddFields(
"mongodb",
d.Fields,

View File

@ -4,7 +4,7 @@ import (
"net/url"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
@ -21,7 +21,7 @@ func (s *Server) getDefaultTags() map[string]string {
return tags
}
func (s *Server) gatherData(acc plugins.Accumulator) error {
func (s *Server) gatherData(acc inputs.Accumulator) error {
s.Session.SetMode(mgo.Eventual, true)
s.Session.SetSocketTimeout(0)
result := &ServerStatus{}

View File

@ -6,7 +6,7 @@ import (
"strings"
_ "github.com/go-sql-driver/mysql"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type Mysql struct {
@ -35,7 +35,7 @@ func (m *Mysql) Description() string {
var localhost = ""
func (m *Mysql) Gather(acc plugins.Accumulator) error {
func (m *Mysql) Gather(acc inputs.Accumulator) error {
if len(m.Servers) == 0 {
// if we can't get stats in this case, thats fine, don't report
// an error.
@ -113,7 +113,7 @@ var mappings = []*mapping{
},
}
func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
func (m *Mysql) gatherServer(serv string, acc inputs.Accumulator) error {
// If user forgot the '/', add it
if strings.HasSuffix(serv, ")") {
serv = serv + "/"
@ -207,7 +207,7 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
}
func init() {
plugins.Add("mysql", func() plugins.Plugin {
inputs.Add("mysql", func() inputs.Input {
return &Mysql{}
})
}

View File

@ -11,7 +11,7 @@ import (
"sync"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type Nginx struct {
@ -31,7 +31,7 @@ func (n *Nginx) Description() string {
return "Read Nginx's basic status information (ngx_http_stub_status_module)"
}
func (n *Nginx) Gather(acc plugins.Accumulator) error {
func (n *Nginx) Gather(acc inputs.Accumulator) error {
var wg sync.WaitGroup
var outerr error
@ -59,7 +59,7 @@ var tr = &http.Transport{
var client = &http.Client{Transport: tr}
func (n *Nginx) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
func (n *Nginx) gatherUrl(addr *url.URL, acc inputs.Accumulator) error {
resp, err := client.Get(addr.String())
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
@ -159,7 +159,7 @@ func getTags(addr *url.URL) map[string]string {
}
func init() {
plugins.Add("nginx", func() plugins.Plugin {
inputs.Add("nginx", func() inputs.Input {
return &Nginx{}
})
}

View File

@ -43,7 +43,7 @@ Using this configuration:
When run with:
```
./telegraf -config telegraf.conf -filter phpfpm -test
./telegraf -config telegraf.conf -input-filter phpfpm -test
```
It produces:

View File

@ -11,7 +11,7 @@ import (
"strings"
"sync"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
const (
@ -67,7 +67,7 @@ func (r *phpfpm) Description() string {
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *phpfpm) Gather(acc plugins.Accumulator) error {
func (g *phpfpm) Gather(acc inputs.Accumulator) error {
if len(g.Urls) == 0 {
return g.gatherServer("http://127.0.0.1/status", acc)
}
@ -90,7 +90,7 @@ func (g *phpfpm) Gather(acc plugins.Accumulator) error {
}
// Request status page to get stat raw data
func (g *phpfpm) gatherServer(addr string, acc plugins.Accumulator) error {
func (g *phpfpm) gatherServer(addr string, acc inputs.Accumulator) error {
if g.client == nil {
client := &http.Client{}
@ -153,7 +153,7 @@ func (g *phpfpm) gatherServer(addr string, acc plugins.Accumulator) error {
}
// Import HTTP stat data into Telegraf system
func importMetric(r io.Reader, acc plugins.Accumulator, host string) (poolStat, error) {
func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, error) {
stats := make(poolStat)
var currentPool string
@ -209,7 +209,7 @@ func importMetric(r io.Reader, acc plugins.Accumulator, host string) (poolStat,
}
func init() {
plugins.Add("phpfpm", func() plugins.Plugin {
inputs.Add("phpfpm", func() inputs.Input {
return &phpfpm{}
})
}

View File

@ -7,7 +7,7 @@ import (
"strings"
"sync"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
// HostPinger is a function that runs the "ping" function using a list of
@ -56,7 +56,7 @@ func (_ *Ping) SampleConfig() string {
return sampleConfig
}
func (p *Ping) Gather(acc plugins.Accumulator) error {
func (p *Ping) Gather(acc inputs.Accumulator) error {
var wg sync.WaitGroup
errorChannel := make(chan error, len(p.Urls)*2)
@ -64,7 +64,7 @@ func (p *Ping) Gather(acc plugins.Accumulator) error {
// Spin off a go routine for each url to ping
for _, url := range p.Urls {
wg.Add(1)
go func(url string, acc plugins.Accumulator) {
go func(url string, acc inputs.Accumulator) {
defer wg.Done()
args := p.args(url)
out, err := p.pingHost(args...)
@ -174,7 +174,7 @@ func processPingOutput(out string) (int, int, float64, error) {
}
func init() {
plugins.Add("ping", func() plugins.Plugin {
inputs.Add("ping", func() inputs.Input {
return &Ping{pingHost: hostPinger}
})
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"strings"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
_ "github.com/lib/pq"
)
@ -53,7 +53,7 @@ func (p *Postgresql) IgnoredColumns() map[string]bool {
var localhost = "host=localhost sslmode=disable"
func (p *Postgresql) Gather(acc plugins.Accumulator) error {
func (p *Postgresql) Gather(acc inputs.Accumulator) error {
var query string
if p.Address == "" || p.Address == "localhost" {
@ -101,7 +101,7 @@ type scanner interface {
Scan(dest ...interface{}) error
}
func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator) error {
func (p *Postgresql) accRow(row scanner, acc inputs.Accumulator) error {
var columnVars []interface{}
var dbname bytes.Buffer
@ -145,7 +145,7 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator) error {
}
func init() {
plugins.Add("postgresql", func() plugins.Plugin {
inputs.Add("postgresql", func() inputs.Input {
return &Postgresql{}
})
}

View File

@ -10,7 +10,7 @@ import (
"github.com/shirou/gopsutil/process"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type Procstat struct {
@ -45,7 +45,7 @@ func (_ *Procstat) Description() string {
return "Monitor process cpu and memory usage"
}
func (p *Procstat) Gather(acc plugins.Accumulator) error {
func (p *Procstat) Gather(acc inputs.Accumulator) error {
procs, err := p.createProcesses()
if err != nil {
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s",
@ -161,7 +161,7 @@ func pidsFromPattern(pattern string) ([]int32, error) {
}
func init() {
plugins.Add("procstat", func() plugins.Plugin {
inputs.Add("procstat", func() inputs.Input {
return NewProcstat()
})
}

View File

@ -6,14 +6,14 @@ import (
"github.com/shirou/gopsutil/process"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type SpecProcessor struct {
Prefix string
tags map[string]string
fields map[string]interface{}
acc plugins.Accumulator
acc inputs.Accumulator
proc *process.Process
}
@ -34,7 +34,7 @@ func (p *SpecProcessor) flush() {
func NewSpecProcessor(
prefix string,
acc plugins.Accumulator,
acc inputs.Accumulator,
p *process.Process,
) *SpecProcessor {
tags := make(map[string]string)

View File

@ -3,7 +3,7 @@ package prometheus
import (
"errors"
"fmt"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"io"
@ -32,7 +32,7 @@ var ErrProtocolError = errors.New("prometheus protocol error")
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *Prometheus) Gather(acc plugins.Accumulator) error {
func (g *Prometheus) Gather(acc inputs.Accumulator) error {
var wg sync.WaitGroup
var outerr error
@ -50,7 +50,7 @@ func (g *Prometheus) Gather(acc plugins.Accumulator) error {
return outerr
}
func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error {
func (g *Prometheus) gatherURL(url string, acc inputs.Accumulator) error {
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", url, err)
@ -97,7 +97,7 @@ func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error {
}
func init() {
plugins.Add("prometheus", func() plugins.Plugin {
inputs.Add("prometheus", func() inputs.Input {
return &Prometheus{}
})
}

View File

@ -8,7 +8,7 @@ import (
"reflect"
"strings"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
// PuppetAgent is a PuppetAgent plugin
@ -82,7 +82,7 @@ func (pa *PuppetAgent) Description() string {
}
// Gather reads stats from all configured servers accumulates stats
func (pa *PuppetAgent) Gather(acc plugins.Accumulator) error {
func (pa *PuppetAgent) Gather(acc inputs.Accumulator) error {
if len(pa.Location) == 0 {
pa.Location = "/var/lib/puppet/state/last_run_summary.yaml"
@ -110,7 +110,7 @@ func (pa *PuppetAgent) Gather(acc plugins.Accumulator) error {
return nil
}
func structPrinter(s *State, acc plugins.Accumulator, tags map[string]string) {
func structPrinter(s *State, acc inputs.Accumulator, tags map[string]string) {
e := reflect.ValueOf(s).Elem()
fields := make(map[string]interface{})
@ -131,7 +131,7 @@ func structPrinter(s *State, acc plugins.Accumulator, tags map[string]string) {
}
func init() {
plugins.Add("puppetagent", func() plugins.Plugin {
inputs.Add("puppetagent", func() inputs.Input {
return &PuppetAgent{}
})
}

View File

@ -7,7 +7,7 @@ import (
"strconv"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
const DefaultUsername = "guest"
@ -91,7 +91,7 @@ type Node struct {
SocketsUsed int64 `json:"sockets_used"`
}
type gatherFunc func(r *RabbitMQ, acc plugins.Accumulator, errChan chan error)
type gatherFunc func(r *RabbitMQ, acc inputs.Accumulator, errChan chan error)
var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues}
@ -114,7 +114,7 @@ func (r *RabbitMQ) Description() string {
return "Read metrics from one or many RabbitMQ servers via the management API"
}
func (r *RabbitMQ) Gather(acc plugins.Accumulator) error {
func (r *RabbitMQ) Gather(acc inputs.Accumulator) error {
if r.Client == nil {
r.Client = &http.Client{}
}
@ -167,7 +167,7 @@ func (r *RabbitMQ) requestJSON(u string, target interface{}) error {
return nil
}
func gatherOverview(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
func gatherOverview(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) {
overview := &OverviewResponse{}
err := r.requestJSON("/api/overview", &overview)
@ -203,7 +203,7 @@ func gatherOverview(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
errChan <- nil
}
func gatherNodes(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
func gatherNodes(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) {
nodes := make([]Node, 0)
// Gather information about nodes
err := r.requestJSON("/api/nodes", &nodes)
@ -240,7 +240,7 @@ func gatherNodes(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
errChan <- nil
}
func gatherQueues(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
func gatherQueues(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) {
// Gather information about queues
queues := make([]Queue, 0)
err := r.requestJSON("/api/queues", &queues)
@ -320,7 +320,7 @@ func (r *RabbitMQ) shouldGatherQueue(queue Queue) bool {
}
func init() {
plugins.Add("rabbitmq", func() plugins.Plugin {
inputs.Add("rabbitmq", func() inputs.Input {
return &RabbitMQ{}
})
}

View File

@ -10,7 +10,7 @@ import (
"strings"
"sync"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
)
type Redis struct {
@ -76,7 +76,7 @@ var ErrProtocolError = errors.New("redis protocol error")
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (r *Redis) Gather(acc plugins.Accumulator) error {
func (r *Redis) Gather(acc inputs.Accumulator) error {
if len(r.Servers) == 0 {
url := &url.URL{
Host: ":6379",
@ -113,7 +113,7 @@ func (r *Redis) Gather(acc plugins.Accumulator) error {
const defaultPort = "6379"
func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
func (r *Redis) gatherServer(addr *url.URL, acc inputs.Accumulator) error {
_, _, err := net.SplitHostPort(addr.Host)
if err != nil {
addr.Host = addr.Host + ":" + defaultPort
@ -158,7 +158,7 @@ func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
// gatherInfoOutput gathers
func gatherInfoOutput(
rdr *bufio.Reader,
acc plugins.Accumulator,
acc inputs.Accumulator,
tags map[string]string,
) error {
var keyspace_hits, keyspace_misses uint64 = 0, 0
@ -227,7 +227,7 @@ func gatherInfoOutput(
func gatherKeyspaceLine(
name string,
line string,
acc plugins.Accumulator,
acc inputs.Accumulator,
tags map[string]string,
) {
if strings.Contains(line, "keys=") {
@ -246,7 +246,7 @@ func gatherKeyspaceLine(
}
func init() {
plugins.Add("redis", func() plugins.Plugin {
inputs.Add("redis", func() inputs.Input {
return &Redis{}
})
}

View File

@ -1,4 +1,4 @@
package plugins
package inputs
import "time"
@ -17,40 +17,40 @@ type Accumulator interface {
t ...time.Time)
}
type Plugin interface {
// SampleConfig returns the default configuration of the Plugin
type Input interface {
// SampleConfig returns the default configuration of the Input
SampleConfig() string
// Description returns a one-sentence description on the Plugin
// Description returns a one-sentence description on the Input
Description() string
// Gather takes in an accumulator and adds the metrics that the Plugin
// Gather takes in an accumulator and adds the metrics that the Input
// gathers. This is called every "interval"
Gather(Accumulator) error
}
type ServicePlugin interface {
// SampleConfig returns the default configuration of the Plugin
type ServiceInput interface {
// SampleConfig returns the default configuration of the Input
SampleConfig() string
// Description returns a one-sentence description on the Plugin
// Description returns a one-sentence description on the Input
Description() string
// Gather takes in an accumulator and adds the metrics that the Plugin
// Gather takes in an accumulator and adds the metrics that the Input
// gathers. This is called every "interval"
Gather(Accumulator) error
// Start starts the ServicePlugin's service, whatever that may be
// Start starts the ServiceInput's service, whatever that may be
Start() error
// Stop stops the services and closes any necessary channels and connections
Stop()
}
type Creator func() Plugin
type Creator func() Input
var Plugins = map[string]Creator{}
var Inputs = map[string]Creator{}
func Add(name string, creator Creator) {
Plugins[name] = creator
Inputs[name] = creator
}

View File

@ -5,7 +5,7 @@ import (
"net/url"
"sync"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/inputs"
"gopkg.in/dancannon/gorethink.v1"
)
@ -35,7 +35,7 @@ var localhost = &Server{Url: &url.URL{Host: "127.0.0.1:28015"}}
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (r *RethinkDB) Gather(acc plugins.Accumulator) error {
func (r *RethinkDB) Gather(acc inputs.Accumulator) error {
if len(r.Servers) == 0 {
r.gatherServer(localhost, acc)
return nil
@ -65,7 +65,7 @@ func (r *RethinkDB) Gather(acc plugins.Accumulator) error {
return outerr
}
func (r *RethinkDB) gatherServer(server *Server, acc plugins.Accumulator) error {
func (r *RethinkDB) gatherServer(server *Server, acc inputs.Accumulator) error {
var err error
connectOpts := gorethink.ConnectOpts{
Address: server.Url.Host,
@ -87,7 +87,7 @@ func (r *RethinkDB) gatherServer(server *Server, acc plugins.Accumulator) error
}
func init() {
plugins.Add("rethinkdb", func() plugins.Plugin {
inputs.Add("rethinkdb", func() inputs.Input {
return &RethinkDB{}
})
}

Some files were not shown because too many files have changed in this diff Show More