From 50fc3ec974664b22d3c0ff79233133b10059dceb Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 11 Dec 2015 13:07:32 -0700 Subject: [PATCH 001/103] Updating system plugins for 0.3.0 --- CHANGELOG.md | 13 + CONFIGURATION.md | 177 ++++++++ README.md | 133 +----- accumulator.go | 74 +++- agent.go | 6 +- etc/telegraf.conf | 4 +- internal/config/config.go | 59 ++- internal/config/testdata/telegraf-agent.toml | 2 +- plugins/system/cpu.go | 53 +-- plugins/system/cpu_test.go | 106 +++++ plugins/system/disk.go | 34 +- plugins/system/disk_test.go | 161 +++++++ plugins/system/docker.go | 77 ++-- plugins/system/memory.go | 40 +- plugins/system/memory_test.go | 73 ++++ plugins/system/net.go | 19 +- plugins/system/net_test.go | 88 ++++ plugins/system/netstat.go | 30 +- plugins/system/ps.go | 52 +++ plugins/system/system.go | 13 +- plugins/system/system_test.go | 426 ------------------- 21 files changed, 928 insertions(+), 712 deletions(-) create mode 100644 CONFIGURATION.md create mode 100644 plugins/system/cpu_test.go create mode 100644 plugins/system/disk_test.go create mode 100644 plugins/system/memory_test.go create mode 100644 plugins/system/net_test.go delete mode 100644 plugins/system/system_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f562c3ea..d3ba9574c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +## v0.3.0 [unreleased] + +### Release Notes +- **breaking change** the `io` plugin has been renamed `diskio` +- **breaking change** Plugin measurements aggregated into a single measurement. + +### Features +- Plugin measurements aggregated into a single measurement. +- Added ability to specify per-plugin tags +- Added ability to specify per-plugin measurement suffix and prefix + +### Bugfixes + ## v0.2.5 [unreleased] ### Features diff --git a/CONFIGURATION.md b/CONFIGURATION.md new file mode 100644 index 000000000..c1040df2d --- /dev/null +++ b/CONFIGURATION.md @@ -0,0 +1,177 @@ +# Telegraf Configuration + +## Plugin Configuration + +There are some configuration options that are configurable per plugin: + +* **name_override**: Override the base name of the measurement. +(Default is the name of the plugin). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific plugin's measurements. + +### Plugin Filters + +There are also filters that can be configured per plugin: + +* **pass**: An array of strings that is used to filter metrics generated by the +current plugin. Each string in the array is tested as a glob match against field names +and if it matches, the field is emitted. +* **drop**: The inverse of pass, if a field name matches, it is not emitted. +* **tagpass**: tag names and arrays of strings that are used to filter +measurements by the current plugin. Each string in the array is tested as a glob +match against the tag name, and if it matches the measurement is emitted. +* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not emitted. +This is tested on measurements that have passed the tagpass test. +* **interval**: How often to gather this metric. Normal plugins use a single +global interval, but if one particular plugin should be run less or more often, +you can configure that here. + +### Plugin Configuration Examples + +This is a full working config that will output CPU data to an InfluxDB instance +at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output +measurements at a 10s interval and will collect per-cpu data, dropping any +fields which begin with `time_`. + +```toml +[tags] + dc = "denver-1" + +[agent] + interval = "10s" + +# OUTPUTS +[outputs] +[[outputs.influxdb]] + url = "http://192.168.59.103:8086" # required. + database = "telegraf" # required. + precision = "s" + +# PLUGINS +[plugins] +[[plugins.cpu]] + percpu = true + totalcpu = false + # filter all fields beginning with 'time_' + drop = ["time_*"] +``` + +### Plugin Config: tagpass and tagdrop + +```toml +[plugins] +[[plugins.cpu]] + percpu = true + totalcpu = false + drop = ["cpu_time"] + # Don't collect CPU data for cpu6 & cpu7 + [plugins.cpu.tagdrop] + cpu = [ "cpu6", "cpu7" ] + +[[plugins.disk]] + [plugins.disk.tagpass] + # tagpass conditions are OR, not AND. + # If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) + # then the metric passes + fstype = [ "ext4", "xfs" ] + # Globs can also be used on the tag values + path = [ "/opt", "/home*" ] +``` + +### Plugin Config: pass and drop + +```toml +# Drop all metrics for guest & steal CPU usage +[[plugins.cpu]] + percpu = false + totalcpu = true + drop = ["usage_guest", "usage_steal"] + +# Only store inode related metrics for disks +[[plugins.disk]] + pass = ["inodes*"] +``` + +### Plugin config: prefix, suffix, and override + +This plugin will emit measurements with the name `cpu_total` + +```toml +[[plugins.cpu]] + name_suffix = "_total" + percpu = false + totalcpu = true +``` + +This will emit measurements with the name `foobar` + +```toml +[[plugins.cpu]] + name_override = "foobar" + percpu = false + totalcpu = true +``` + +### Plugin config: tags + +This plugin will emit measurements with two additional tags: `tag1=foo` and +`tag2=bar` + +```toml +[[plugins.cpu]] + percpu = false + totalcpu = true + [plugins.cpu.tags] + tag1 = "foo" + tag2 = "bar" +``` + +### Multiple plugins of the same type + +Additional plugins (or outputs) of the same type can be specified, +just define more instances in the config file: + +```toml +[[plugins.cpu]] + percpu = false + totalcpu = true + +[[plugins.cpu]] + percpu = true + totalcpu = false + drop = ["cpu_time*"] +``` + +## Output Configuration + +Telegraf also supports specifying multiple output sinks to send data to, +configuring each output sink is different, but examples can be +found by running `telegraf -sample-config`. + +Outputs also support the same configurable options as plugins +(pass, drop, tagpass, tagdrop), added in 0.2.4 + +```toml +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf" + precision = "s" + # Drop all measurements that start with "aerospike" + drop = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-aerospike-data" + precision = "s" + # Only accept aerospike data: + pass = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-cpu0-data" + precision = "s" + # Only store measurements where the tag "cpu" matches the value "cpu0" + [outputs.influxdb.tagpass] + cpu = ["cpu0"] +``` diff --git a/README.md b/README.md index 4130d55bb..aefcece44 100644 --- a/README.md +++ b/README.md @@ -116,99 +116,10 @@ unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes. * **debug**: Set to true to gather and send metrics to STDOUT as well as InfluxDB. -## Plugin Options +## Configuration -There are 5 configuration options that are configurable per plugin: - -* **pass**: An array of strings that is used to filter metrics generated by the -current plugin. Each string in the array is tested as a glob match against metric names -and if it matches, the metric is emitted. -* **drop**: The inverse of pass, if a metric name matches, it is not emitted. -* **tagpass**: tag names and arrays of strings that are used to filter metrics by the current plugin. Each string in the array is tested as a glob match against -the tag name, and if it matches the metric is emitted. -* **tagdrop**: The inverse of tagpass. If a tag matches, the metric is not emitted. -This is tested on metrics that have passed the tagpass test. -* **interval**: How often to gather this metric. Normal plugins use a single -global interval, but if one particular plugin should be run less or more often, -you can configure that here. - -### Plugin Configuration Examples - -This is a full working config that will output CPU data to an InfluxDB instance -at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output -measurements at a 10s interval and will collect per-cpu data, dropping any -measurements which begin with `cpu_time`. - -```toml -[tags] - dc = "denver-1" - -[agent] - interval = "10s" - -# OUTPUTS -[outputs] -[[outputs.influxdb]] - url = "http://192.168.59.103:8086" # required. - database = "telegraf" # required. - precision = "s" - -# PLUGINS -[plugins] -[[plugins.cpu]] - percpu = true - totalcpu = false - drop = ["cpu_time*"] -``` - -Below is how to configure `tagpass` and `tagdrop` parameters - -```toml -[plugins] -[[plugins.cpu]] - percpu = true - totalcpu = false - drop = ["cpu_time"] - # Don't collect CPU data for cpu6 & cpu7 - [plugins.cpu.tagdrop] - cpu = [ "cpu6", "cpu7" ] - -[[plugins.disk]] - [plugins.disk.tagpass] - # tagpass conditions are OR, not AND. - # If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) - # then the metric passes - fstype = [ "ext4", "xfs" ] - # Globs can also be used on the tag values - path = [ "/opt", "/home*" ] -``` - -Below is how to configure `pass` and `drop` parameters - -```toml -# Drop all metrics for guest CPU usage -[[plugins.cpu]] - drop = [ "cpu_usage_guest" ] - -# Only store inode related metrics for disks -[[plugins.disk]] - pass = [ "disk_inodes*" ] -``` - - -Additional plugins (or outputs) of the same type can be specified, -just define more instances in the config file: - -```toml -[[plugins.cpu]] - percpu = false - totalcpu = true - -[[plugins.cpu]] - percpu = true - totalcpu = false - drop = ["cpu_time*"] -``` +See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced +configuration options. ## Supported Plugins @@ -226,7 +137,7 @@ Telegraf currently has support for collecting metrics from: * haproxy * httpjson (generic JSON-emitting http service plugin) * influxdb -* jolokia (remote JMX with JSON over HTTP) +* jolokia * leofs * lustre2 * mailchimp @@ -249,10 +160,10 @@ Telegraf currently has support for collecting metrics from: * system * cpu * mem - * io * net * netstat * disk + * diskio * swap ## Supported Service Plugins @@ -265,40 +176,6 @@ Telegraf can collect metrics via the following services: We'll be adding support for many more over the coming months. Read on if you want to add support for another service or third-party API. -## Output options - -Telegraf also supports specifying multiple output sinks to send data to, -configuring each output sink is different, but examples can be -found by running `telegraf -sample-config`. - -Outputs also support the same configurable options as plugins -(pass, drop, tagpass, tagdrop), added in 0.2.4 - -```toml -[[outputs.influxdb]] - urls = [ "http://localhost:8086" ] - database = "telegraf" - precision = "s" - # Drop all measurements that start with "aerospike" - drop = ["aerospike*"] - -[[outputs.influxdb]] - urls = [ "http://localhost:8086" ] - database = "telegraf-aerospike-data" - precision = "s" - # Only accept aerospike data: - pass = ["aerospike*"] - -[[outputs.influxdb]] - urls = [ "http://localhost:8086" ] - database = "telegraf-cpu0-data" - precision = "s" - # Only store measurements where the tag "cpu" matches the value "cpu0" - [outputs.influxdb.tagpass] - cpu = ["cpu0"] -``` - - ## Supported Outputs * influxdb diff --git a/accumulator.go b/accumulator.go index 8dbf2e8aa..2defc8c7b 100644 --- a/accumulator.go +++ b/accumulator.go @@ -69,30 +69,72 @@ func (ac *accumulator) AddFields( tags map[string]string, t ...time.Time, ) { - // Validate uint64 and float64 fields + if !ac.pluginConfig.Filter.ShouldTagsPass(tags) { + return + } + + // Override measurement name if set + if len(ac.pluginConfig.NameOverride) != 0 { + measurement = ac.pluginConfig.NameOverride + } + // Apply measurement prefix and suffix if set + if len(ac.pluginConfig.MeasurementPrefix) != 0 { + measurement = ac.pluginConfig.MeasurementPrefix + measurement + } + if len(ac.pluginConfig.MeasurementSuffix) != 0 { + measurement = measurement + ac.pluginConfig.MeasurementSuffix + } + + if tags == nil { + tags = make(map[string]string) + } + // Apply plugin-wide tags if set + for k, v := range ac.pluginConfig.Tags { + if _, ok := tags[k]; !ok { + tags[k] = v + } + } + // Apply daemon-wide tags if set + for k, v := range ac.defaultTags { + if _, ok := tags[k]; !ok { + tags[k] = v + } + } + + result := make(map[string]interface{}) for k, v := range fields { + // Filter out any filtered fields + if ac.pluginConfig != nil { + if !ac.pluginConfig.Filter.ShouldPass(k) { + continue + } + } + result[k] = v + + // Validate uint64 and float64 fields switch val := v.(type) { case uint64: // InfluxDB does not support writing uint64 if val < uint64(9223372036854775808) { - fields[k] = int64(val) + result[k] = int64(val) } else { - fields[k] = int64(9223372036854775807) + result[k] = int64(9223372036854775807) } case float64: // NaNs are invalid values in influxdb, skip measurement if math.IsNaN(val) || math.IsInf(val, 0) { if ac.debug { - log.Printf("Measurement [%s] has a NaN or Inf field, skipping", - measurement) + log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+ + "field, skipping", + measurement, k) } - return + continue } } } - - if tags == nil { - tags = make(map[string]string) + fields = nil + if len(result) == 0 { + return } var timestamp time.Time @@ -106,19 +148,7 @@ func (ac *accumulator) AddFields( measurement = ac.prefix + measurement } - if ac.pluginConfig != nil { - if !ac.pluginConfig.Filter.ShouldPass(measurement) || !ac.pluginConfig.Filter.ShouldTagsPass(tags) { - return - } - } - - for k, v := range ac.defaultTags { - if _, ok := tags[k]; !ok { - tags[k] = v - } - } - - pt, err := client.NewPoint(measurement, tags, fields, timestamp) + pt, err := client.NewPoint(measurement, tags, result, timestamp) if err != nil { log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) return diff --git a/agent.go b/agent.go index 68b1b5f16..1658027a7 100644 --- a/agent.go +++ b/agent.go @@ -104,7 +104,7 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error { acc := NewAccumulator(plugin.Config, pointChan) acc.SetDebug(a.Config.Agent.Debug) - acc.SetPrefix(plugin.Name + "_") + // acc.SetPrefix(plugin.Name + "_") acc.SetDefaultTags(a.Config.Tags) if err := plugin.Plugin.Gather(acc); err != nil { @@ -141,7 +141,7 @@ func (a *Agent) gatherSeparate( acc := NewAccumulator(plugin.Config, pointChan) acc.SetDebug(a.Config.Agent.Debug) - acc.SetPrefix(plugin.Name + "_") + // acc.SetPrefix(plugin.Name + "_") acc.SetDefaultTags(a.Config.Tags) if err := plugin.Plugin.Gather(acc); err != nil { @@ -187,7 +187,7 @@ func (a *Agent) Test() error { for _, plugin := range a.Config.Plugins { acc := NewAccumulator(plugin.Config, pointChan) acc.SetDebug(true) - acc.SetPrefix(plugin.Name + "_") + // acc.SetPrefix(plugin.Name + "_") fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name) if plugin.Config.Interval != 0 { diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 0781d3028..9460cef25 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -97,8 +97,8 @@ # Mountpoints=["/"] # Read metrics about disk IO by device -[[plugins.io]] - # By default, telegraf will gather stats for all devices including +[[plugins.diskio]] + # By default, telegraf will gather stats for all devices including # disk partitions. # Setting devices will restrict the stats to the specified devices. # Devices=["sda","sdb"] diff --git a/internal/config/config.go b/internal/config/config.go index 348496f0a..0270a3913 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -112,9 +112,13 @@ type Filter struct { // PluginConfig containing a name, interval, and filter type PluginConfig struct { - Name string - Filter Filter - Interval time.Duration + Name string + NameOverride string + MeasurementPrefix string + MeasurementSuffix string + Tags map[string]string + Filter Filter + Interval time.Duration } // OutputConfig containing name and filter @@ -142,12 +146,12 @@ func (ro *RunningOutput) FilterPoints(points []*client.Point) []*client.Point { // ShouldPass returns true if the metric should pass, false if should drop // based on the drop/pass filter parameters -func (f Filter) ShouldPass(measurement string) bool { +func (f Filter) ShouldPass(fieldkey string) bool { if f.Pass != nil { for _, pat := range f.Pass { // TODO remove HasPrefix check, leaving it for now for legacy support. // Cam, 2015-12-07 - if strings.HasPrefix(measurement, pat) || internal.Glob(pat, measurement) { + if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) { return true } } @@ -158,7 +162,7 @@ func (f Filter) ShouldPass(measurement string) bool { for _, pat := range f.Drop { // TODO remove HasPrefix check, leaving it for now for legacy support. // Cam, 2015-12-07 - if strings.HasPrefix(measurement, pat) || internal.Glob(pat, measurement) { + if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) { return false } } @@ -628,7 +632,8 @@ func buildFilter(tbl *ast.Table) Filter { return f } -// buildPlugin parses plugin specific items from the ast.Table, builds the filter and returns a +// buildPlugin parses plugin specific items from the ast.Table, +// builds the filter and returns a // PluginConfig to be inserted into RunningPlugin func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) { cp := &PluginConfig{Name: name} @@ -644,10 +649,47 @@ func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) { } } } + + if node, ok := tbl.Fields["name_prefix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + cp.MeasurementPrefix = str.Value + } + } + } + + if node, ok := tbl.Fields["name_suffix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + cp.MeasurementSuffix = str.Value + } + } + } + + if node, ok := tbl.Fields["name_override"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + cp.NameOverride = str.Value + } + } + } + + cp.Tags = make(map[string]string) + if node, ok := tbl.Fields["tags"]; ok { + if subtbl, ok := node.(*ast.Table); ok { + if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil { + log.Printf("Could not parse tags for plugin %s\n", name) + } + } + } + + delete(tbl.Fields, "name_prefix") + delete(tbl.Fields, "name_suffix") + delete(tbl.Fields, "name_override") delete(tbl.Fields, "interval") + delete(tbl.Fields, "tags") cp.Filter = buildFilter(tbl) return cp, nil - } // buildOutput parses output specific items from the ast.Table, builds the filter and returns an @@ -659,5 +701,4 @@ func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) { Filter: buildFilter(tbl), } return oc, nil - } diff --git a/internal/config/testdata/telegraf-agent.toml b/internal/config/testdata/telegraf-agent.toml index e63e47b56..42ce89cd8 100644 --- a/internal/config/testdata/telegraf-agent.toml +++ b/internal/config/testdata/telegraf-agent.toml @@ -105,7 +105,7 @@ urls = ["http://localhost/server-status?auto"] drop = ["cpu_time"] # Read metrics about disk usage by mount point -[[plugins.disk]] +[[plugins.diskio]] # no configuration # Read metrics from one or many disque servers diff --git a/plugins/system/cpu.go b/plugins/system/cpu.go index 837a1bc23..24350fc6c 100644 --- a/plugins/system/cpu.go +++ b/plugins/system/cpu.go @@ -2,6 +2,7 @@ package system import ( "fmt" + "time" "github.com/influxdb/telegraf/plugins" "github.com/shirou/gopsutil/cpu" @@ -31,7 +32,7 @@ var sampleConfig = ` # Whether to report total system cpu stats or not totalcpu = true # Comment this line if you want the raw CPU time metrics - drop = ["cpu_time*"] + drop = ["time_*"] ` func (_ *CPUStats) SampleConfig() string { @@ -43,6 +44,7 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error { if err != nil { return fmt.Errorf("error getting CPU info: %s", err) } + now := time.Now() for i, cts := range times { tags := map[string]string{ @@ -51,21 +53,24 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error { total := totalCpuTime(cts) - // Add total cpu numbers - add(acc, "time_user", cts.User, tags) - add(acc, "time_system", cts.System, tags) - add(acc, "time_idle", cts.Idle, tags) - add(acc, "time_nice", cts.Nice, tags) - add(acc, "time_iowait", cts.Iowait, tags) - add(acc, "time_irq", cts.Irq, tags) - add(acc, "time_softirq", cts.Softirq, tags) - add(acc, "time_steal", cts.Steal, tags) - add(acc, "time_guest", cts.Guest, tags) - add(acc, "time_guest_nice", cts.GuestNice, tags) + // Add cpu time metrics + fields := map[string]interface{}{ + "time_user": cts.User, + "time_system": cts.System, + "time_idle": cts.Idle, + "time_nice": cts.Nice, + "time_iowait": cts.Iowait, + "time_irq": cts.Irq, + "time_softirq": cts.Softirq, + "time_steal": cts.Steal, + "time_guest": cts.Guest, + "time_guest_nice": cts.GuestNice, + } // Add in percentage if len(s.lastStats) == 0 { - // If it's the 1st gather, can't get CPU stats yet + acc.AddFields("cpu", fields, tags, now) + // If it's the 1st gather, can't get CPU Usage stats yet continue } lastCts := s.lastStats[i] @@ -81,17 +86,17 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error { continue } - add(acc, "usage_user", 100*(cts.User-lastCts.User)/totalDelta, tags) - add(acc, "usage_system", 100*(cts.System-lastCts.System)/totalDelta, tags) - add(acc, "usage_idle", 100*(cts.Idle-lastCts.Idle)/totalDelta, tags) - add(acc, "usage_nice", 100*(cts.Nice-lastCts.Nice)/totalDelta, tags) - add(acc, "usage_iowait", 100*(cts.Iowait-lastCts.Iowait)/totalDelta, tags) - add(acc, "usage_irq", 100*(cts.Irq-lastCts.Irq)/totalDelta, tags) - add(acc, "usage_softirq", 100*(cts.Softirq-lastCts.Softirq)/totalDelta, tags) - add(acc, "usage_steal", 100*(cts.Steal-lastCts.Steal)/totalDelta, tags) - add(acc, "usage_guest", 100*(cts.Guest-lastCts.Guest)/totalDelta, tags) - add(acc, "usage_guest_nice", 100*(cts.GuestNice-lastCts.GuestNice)/totalDelta, tags) - + fields["usage_user"] = 100 * (cts.User - lastCts.User) / totalDelta + fields["usage_system"] = 100 * (cts.System - lastCts.System) / totalDelta + fields["usage_idle"] = 100 * (cts.Idle - lastCts.Idle) / totalDelta + fields["usage_nice"] = 100 * (cts.Nice - lastCts.Nice) / totalDelta + fields["usage_iowait"] = 100 * (cts.Iowait - lastCts.Iowait) / totalDelta + fields["usage_irq"] = 100 * (cts.Irq - lastCts.Irq) / totalDelta + fields["usage_softirq"] = 100 * (cts.Softirq - lastCts.Softirq) / totalDelta + fields["usage_steal"] = 100 * (cts.Steal - lastCts.Steal) / totalDelta + fields["usage_guest"] = 100 * (cts.Guest - lastCts.Guest) / totalDelta + fields["usage_guest_nice"] = 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta + acc.AddFields("cpu", fields, tags, now) } s.lastStats = times diff --git a/plugins/system/cpu_test.go b/plugins/system/cpu_test.go new file mode 100644 index 000000000..843d166cb --- /dev/null +++ b/plugins/system/cpu_test.go @@ -0,0 +1,106 @@ +package system + +import ( + "testing" + + "github.com/influxdb/telegraf/testutil" + "github.com/shirou/gopsutil/cpu" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCPUStats(t *testing.T) { + var mps MockPS + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + cts := cpu.CPUTimesStat{ + CPU: "cpu0", + User: 3.1, + System: 8.2, + Idle: 80.1, + Nice: 1.3, + Iowait: 0.2, + Irq: 0.1, + Softirq: 0.11, + Steal: 0.0511, + Guest: 8.1, + GuestNice: 0.324, + } + + cts2 := cpu.CPUTimesStat{ + CPU: "cpu0", + User: 11.4, // increased by 8.3 + System: 10.9, // increased by 2.7 + Idle: 158.8699, // increased by 78.7699 (for total increase of 100) + Nice: 2.5, // increased by 1.2 + Iowait: 0.7, // increased by 0.5 + Irq: 1.2, // increased by 1.1 + Softirq: 0.31, // increased by 0.2 + Steal: 0.2812, // increased by 0.0001 + Guest: 12.9, // increased by 4.8 + GuestNice: 2.524, // increased by 2.2 + } + + mps.On("CPUTimes").Return([]cpu.CPUTimesStat{cts}, nil) + + cs := NewCPUStats(&mps) + + cputags := map[string]string{ + "cpu": "cpu0", + } + + err := cs.Gather(&acc) + require.NoError(t, err) + numCPUPoints := len(acc.Points) + + expectedCPUPoints := 10 + assert.Equal(t, expectedCPUPoints, numCPUPoints) + + // Computed values are checked with delta > 0 becasue of floating point arithmatic + // imprecision + assertContainsTaggedFloat(t, &acc, "time_user", 3.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_iowait", 0.2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_irq", 0.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_guest", 8.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0, cputags) + + mps2 := MockPS{} + mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil) + cs.ps = &mps2 + + // Should have added cpu percentages too + err = cs.Gather(&acc) + require.NoError(t, err) + + numCPUPoints = len(acc.Points) - numCPUPoints + expectedCPUPoints = 20 + assert.Equal(t, expectedCPUPoints, numCPUPoints) + + assertContainsTaggedFloat(t, &acc, "time_user", 11.4, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_idle", 158.8699, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_nice", 2.5, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_iowait", 0.7, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_guest", 12.9, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0, cputags) + + assertContainsTaggedFloat(t, &acc, "usage_user", 8.3, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_idle", 78.7699, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_nice", 1.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.5, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_irq", 1.1, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_guest", 4.8, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005, cputags) +} diff --git a/plugins/system/disk.go b/plugins/system/disk.go index 2e202f8d2..410044d2c 100644 --- a/plugins/system/disk.go +++ b/plugins/system/disk.go @@ -50,12 +50,15 @@ func (s *DiskStats) Gather(acc plugins.Accumulator) error { "path": du.Path, "fstype": du.Fstype, } - acc.Add("total", du.Total, tags) - acc.Add("free", du.Free, tags) - acc.Add("used", du.Total-du.Free, tags) - acc.Add("inodes_total", du.InodesTotal, tags) - acc.Add("inodes_free", du.InodesFree, tags) - acc.Add("inodes_used", du.InodesTotal-du.InodesFree, tags) + fields := map[string]interface{}{ + "total": du.Total, + "free": du.Free, + "used": du.Total - du.Free, + "inodes_total": du.InodesTotal, + "inodes_free": du.InodesFree, + "inodes_used": du.InodesTotal - du.InodesFree, + } + acc.AddFields("disk", fields, tags) } return nil @@ -115,13 +118,16 @@ func (s *DiskIOStats) Gather(acc plugins.Accumulator) error { } } - acc.Add("reads", io.ReadCount, tags) - acc.Add("writes", io.WriteCount, tags) - acc.Add("read_bytes", io.ReadBytes, tags) - acc.Add("write_bytes", io.WriteBytes, tags) - acc.Add("read_time", io.ReadTime, tags) - acc.Add("write_time", io.WriteTime, tags) - acc.Add("io_time", io.IoTime, tags) + fields := map[string]interface{}{ + "reads": io.ReadCount, + "writes": io.WriteCount, + "read_bytes": io.ReadBytes, + "write_bytes": io.WriteBytes, + "read_time": io.ReadTime, + "write_time": io.WriteTime, + "io_time": io.IoTime, + } + acc.AddFields("diskio", fields, tags) } return nil @@ -132,7 +138,7 @@ func init() { return &DiskStats{ps: &systemPS{}} }) - plugins.Add("io", func() plugins.Plugin { + plugins.Add("diskio", func() plugins.Plugin { return &DiskIOStats{ps: &systemPS{}} }) } diff --git a/plugins/system/disk_test.go b/plugins/system/disk_test.go new file mode 100644 index 000000000..abeba736b --- /dev/null +++ b/plugins/system/disk_test.go @@ -0,0 +1,161 @@ +package system + +import ( + "testing" + + "github.com/influxdb/telegraf/testutil" + "github.com/shirou/gopsutil/disk" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDiskStats(t *testing.T) { + var mps MockPS + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + var err error + + du := []*disk.DiskUsageStat{ + { + Path: "/", + Fstype: "ext4", + Total: 128, + Free: 23, + InodesTotal: 1234, + InodesFree: 234, + }, + { + Path: "/home", + Fstype: "ext4", + Total: 256, + Free: 46, + InodesTotal: 2468, + InodesFree: 468, + }, + } + + mps.On("DiskUsage").Return(du, nil) + + err = (&DiskStats{ps: &mps}).Gather(&acc) + require.NoError(t, err) + + numDiskPoints := len(acc.Points) + expectedAllDiskPoints := 12 + assert.Equal(t, expectedAllDiskPoints, numDiskPoints) + + tags1 := map[string]string{ + "path": "/", + "fstype": "ext4", + } + tags2 := map[string]string{ + "path": "/home", + "fstype": "ext4", + } + + assert.True(t, acc.CheckTaggedValue("total", uint64(128), tags1)) + assert.True(t, acc.CheckTaggedValue("used", uint64(105), tags1)) + assert.True(t, acc.CheckTaggedValue("free", uint64(23), tags1)) + assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(1234), tags1)) + assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(234), tags1)) + assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(1000), tags1)) + assert.True(t, acc.CheckTaggedValue("total", uint64(256), tags2)) + assert.True(t, acc.CheckTaggedValue("used", uint64(210), tags2)) + assert.True(t, acc.CheckTaggedValue("free", uint64(46), tags2)) + assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(2468), tags2)) + assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(468), tags2)) + assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(2000), tags2)) + + // We expect 6 more DiskPoints to show up with an explicit match on "/" + // and /home not matching the /dev in Mountpoints + err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc) + assert.Equal(t, expectedAllDiskPoints+6, len(acc.Points)) + + // We should see all the diskpoints as Mountpoints includes both + // / and /home + err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc) + assert.Equal(t, 2*expectedAllDiskPoints+6, len(acc.Points)) + +} + +func TestDiskIOStats(t *testing.T) { + var mps MockPS + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + var err error + + diskio1 := disk.DiskIOCountersStat{ + + ReadCount: 888, + WriteCount: 5341, + ReadBytes: 100000, + WriteBytes: 200000, + ReadTime: 7123, + WriteTime: 9087, + Name: "sda1", + IoTime: 123552, + SerialNumber: "ab-123-ad", + } + diskio2 := disk.DiskIOCountersStat{ + ReadCount: 444, + WriteCount: 2341, + ReadBytes: 200000, + WriteBytes: 400000, + ReadTime: 3123, + WriteTime: 6087, + Name: "sdb1", + IoTime: 246552, + SerialNumber: "bb-123-ad", + } + + mps.On("DiskIO").Return( + map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2}, + nil) + + err = (&DiskIOStats{ps: &mps}).Gather(&acc) + require.NoError(t, err) + + numDiskIOPoints := len(acc.Points) + expectedAllDiskIOPoints := 14 + assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints) + + dtags1 := map[string]string{ + "name": "sda1", + "serial": "ab-123-ad", + } + dtags2 := map[string]string{ + "name": "sdb1", + "serial": "bb-123-ad", + } + + assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1)) + assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1)) + assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1)) + assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1)) + assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1)) + assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1)) + assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1)) + assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2)) + assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2)) + assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2)) + assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2)) + assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2)) + assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2)) + assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2)) + + // We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1" + // and serial should be missing from the tags with SkipSerialNumber set + err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc) + assert.Equal(t, expectedAllDiskIOPoints+7, len(acc.Points)) + + dtags3 := map[string]string{ + "name": "sdb1", + } + + assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3)) + assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3)) + assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3)) + assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3)) + assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3)) + assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3)) + assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3)) +} diff --git a/plugins/system/docker.go b/plugins/system/docker.go index 94f8ad059..2d6146a59 100644 --- a/plugins/system/docker.go +++ b/plugins/system/docker.go @@ -36,44 +36,47 @@ func (s *DockerStats) Gather(acc plugins.Accumulator) error { cts := cont.CPU - acc.Add("user", cts.User, tags) - acc.Add("system", cts.System, tags) - acc.Add("idle", cts.Idle, tags) - acc.Add("nice", cts.Nice, tags) - acc.Add("iowait", cts.Iowait, tags) - acc.Add("irq", cts.Irq, tags) - acc.Add("softirq", cts.Softirq, tags) - acc.Add("steal", cts.Steal, tags) - acc.Add("guest", cts.Guest, tags) - acc.Add("guest_nice", cts.GuestNice, tags) + fields := map[string]interface{}{ + "user": cts.User, + "system": cts.System, + "idle": cts.Idle, + "nice": cts.Nice, + "iowait": cts.Iowait, + "irq": cts.Irq, + "softirq": cts.Softirq, + "steal": cts.Steal, + "guest": cts.Guest, + "guest_nice": cts.GuestNice, - acc.Add("cache", cont.Mem.Cache, tags) - acc.Add("rss", cont.Mem.RSS, tags) - acc.Add("rss_huge", cont.Mem.RSSHuge, tags) - acc.Add("mapped_file", cont.Mem.MappedFile, tags) - acc.Add("swap_in", cont.Mem.Pgpgin, tags) - acc.Add("swap_out", cont.Mem.Pgpgout, tags) - acc.Add("page_fault", cont.Mem.Pgfault, tags) - acc.Add("page_major_fault", cont.Mem.Pgmajfault, tags) - acc.Add("inactive_anon", cont.Mem.InactiveAnon, tags) - acc.Add("active_anon", cont.Mem.ActiveAnon, tags) - acc.Add("inactive_file", cont.Mem.InactiveFile, tags) - acc.Add("active_file", cont.Mem.ActiveFile, tags) - acc.Add("unevictable", cont.Mem.Unevictable, tags) - acc.Add("memory_limit", cont.Mem.HierarchicalMemoryLimit, tags) - acc.Add("total_cache", cont.Mem.TotalCache, tags) - acc.Add("total_rss", cont.Mem.TotalRSS, tags) - acc.Add("total_rss_huge", cont.Mem.TotalRSSHuge, tags) - acc.Add("total_mapped_file", cont.Mem.TotalMappedFile, tags) - acc.Add("total_swap_in", cont.Mem.TotalPgpgIn, tags) - acc.Add("total_swap_out", cont.Mem.TotalPgpgOut, tags) - acc.Add("total_page_fault", cont.Mem.TotalPgFault, tags) - acc.Add("total_page_major_fault", cont.Mem.TotalPgMajFault, tags) - acc.Add("total_inactive_anon", cont.Mem.TotalInactiveAnon, tags) - acc.Add("total_active_anon", cont.Mem.TotalActiveAnon, tags) - acc.Add("total_inactive_file", cont.Mem.TotalInactiveFile, tags) - acc.Add("total_active_file", cont.Mem.TotalActiveFile, tags) - acc.Add("total_unevictable", cont.Mem.TotalUnevictable, tags) + "cache": cont.Mem.Cache, + "rss": cont.Mem.RSS, + "rss_huge": cont.Mem.RSSHuge, + "mapped_file": cont.Mem.MappedFile, + "swap_in": cont.Mem.Pgpgin, + "swap_out": cont.Mem.Pgpgout, + "page_fault": cont.Mem.Pgfault, + "page_major_fault": cont.Mem.Pgmajfault, + "inactive_anon": cont.Mem.InactiveAnon, + "active_anon": cont.Mem.ActiveAnon, + "inactive_file": cont.Mem.InactiveFile, + "active_file": cont.Mem.ActiveFile, + "unevictable": cont.Mem.Unevictable, + "memory_limit": cont.Mem.HierarchicalMemoryLimit, + "total_cache": cont.Mem.TotalCache, + "total_rss": cont.Mem.TotalRSS, + "total_rss_huge": cont.Mem.TotalRSSHuge, + "total_mapped_file": cont.Mem.TotalMappedFile, + "total_swap_in": cont.Mem.TotalPgpgIn, + "total_swap_out": cont.Mem.TotalPgpgOut, + "total_page_fault": cont.Mem.TotalPgFault, + "total_page_major_fault": cont.Mem.TotalPgMajFault, + "total_inactive_anon": cont.Mem.TotalInactiveAnon, + "total_active_anon": cont.Mem.TotalActiveAnon, + "total_inactive_file": cont.Mem.TotalInactiveFile, + "total_active_file": cont.Mem.TotalActiveFile, + "total_unevictable": cont.Mem.TotalUnevictable, + } + acc.AddFields("docker", fields, tags) } return nil diff --git a/plugins/system/memory.go b/plugins/system/memory.go index 11e7afbb1..23ce94608 100644 --- a/plugins/system/memory.go +++ b/plugins/system/memory.go @@ -22,18 +22,17 @@ func (s *MemStats) Gather(acc plugins.Accumulator) error { return fmt.Errorf("error getting virtual memory info: %s", err) } - vmtags := map[string]string(nil) - - acc.Add("total", vm.Total, vmtags) - acc.Add("available", vm.Available, vmtags) - acc.Add("used", vm.Used, vmtags) - acc.Add("free", vm.Free, vmtags) - acc.Add("cached", vm.Cached, vmtags) - acc.Add("buffered", vm.Buffers, vmtags) - acc.Add("used_percent", 100*float64(vm.Used)/float64(vm.Total), vmtags) - acc.Add("available_percent", - 100*float64(vm.Available)/float64(vm.Total), - vmtags) + fields := map[string]interface{}{ + "total": vm.Total, + "available": vm.Available, + "used": vm.Used, + "free": vm.Free, + "cached": vm.Cached, + "buffered": vm.Buffers, + "used_percent": 100 * float64(vm.Used) / float64(vm.Total), + "available_percent": 100 * float64(vm.Available) / float64(vm.Total), + } + acc.AddFields("mem", fields, nil) return nil } @@ -54,14 +53,15 @@ func (s *SwapStats) Gather(acc plugins.Accumulator) error { return fmt.Errorf("error getting swap memory info: %s", err) } - swaptags := map[string]string(nil) - - acc.Add("total", swap.Total, swaptags) - acc.Add("used", swap.Used, swaptags) - acc.Add("free", swap.Free, swaptags) - acc.Add("used_percent", swap.UsedPercent, swaptags) - acc.Add("in", swap.Sin, swaptags) - acc.Add("out", swap.Sout, swaptags) + fields := map[string]interface{}{ + "total": swap.Total, + "used": swap.Used, + "free": swap.Free, + "used_percent": swap.UsedPercent, + "in": swap.Sin, + "out": swap.Sout, + } + acc.AddFields("swap", fields, nil) return nil } diff --git a/plugins/system/memory_test.go b/plugins/system/memory_test.go new file mode 100644 index 000000000..4b97501a9 --- /dev/null +++ b/plugins/system/memory_test.go @@ -0,0 +1,73 @@ +package system + +import ( + "testing" + + "github.com/influxdb/telegraf/testutil" + "github.com/shirou/gopsutil/mem" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMemStats(t *testing.T) { + var mps MockPS + var err error + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + vms := &mem.VirtualMemoryStat{ + Total: 12400, + Available: 7600, + Used: 5000, + Free: 1235, + // Active: 8134, + // Inactive: 1124, + // Buffers: 771, + // Cached: 4312, + // Wired: 134, + // Shared: 2142, + } + + mps.On("VMStat").Return(vms, nil) + + sms := &mem.SwapMemoryStat{ + Total: 8123, + Used: 1232, + Free: 6412, + UsedPercent: 12.2, + Sin: 7, + Sout: 830, + } + + mps.On("SwapStat").Return(sms, nil) + + err = (&MemStats{&mps}).Gather(&acc) + require.NoError(t, err) + + vmtags := map[string]string(nil) + + assert.True(t, acc.CheckTaggedValue("total", uint64(12400), vmtags)) + assert.True(t, acc.CheckTaggedValue("available", uint64(7600), vmtags)) + assert.True(t, acc.CheckTaggedValue("used", uint64(5000), vmtags)) + assert.True(t, acc.CheckTaggedValue("available_percent", + float64(7600)/float64(12400)*100, + vmtags)) + assert.True(t, acc.CheckTaggedValue("used_percent", + float64(5000)/float64(12400)*100, + vmtags)) + assert.True(t, acc.CheckTaggedValue("free", uint64(1235), vmtags)) + + acc.Points = nil + + err = (&SwapStats{&mps}).Gather(&acc) + require.NoError(t, err) + + swaptags := map[string]string(nil) + + assert.NoError(t, acc.ValidateTaggedValue("total", uint64(8123), swaptags)) + assert.NoError(t, acc.ValidateTaggedValue("used", uint64(1232), swaptags)) + assert.NoError(t, acc.ValidateTaggedValue("used_percent", float64(12.2), swaptags)) + assert.NoError(t, acc.ValidateTaggedValue("free", uint64(6412), swaptags)) + assert.NoError(t, acc.ValidateTaggedValue("in", uint64(7), swaptags)) + assert.NoError(t, acc.ValidateTaggedValue("out", uint64(830), swaptags)) +} diff --git a/plugins/system/net.go b/plugins/system/net.go index 9dbcc4577..23f856d6d 100644 --- a/plugins/system/net.go +++ b/plugins/system/net.go @@ -70,14 +70,17 @@ func (s *NetIOStats) Gather(acc plugins.Accumulator) error { "interface": io.Name, } - acc.Add("bytes_sent", io.BytesSent, tags) - acc.Add("bytes_recv", io.BytesRecv, tags) - acc.Add("packets_sent", io.PacketsSent, tags) - acc.Add("packets_recv", io.PacketsRecv, tags) - acc.Add("err_in", io.Errin, tags) - acc.Add("err_out", io.Errout, tags) - acc.Add("drop_in", io.Dropin, tags) - acc.Add("drop_out", io.Dropout, tags) + fields := map[string]interface{}{ + "bytes_sent": io.BytesSent, + "bytes_recv": io.BytesRecv, + "packets_sent": io.PacketsSent, + "packets_recv": io.PacketsRecv, + "err_in": io.Errin, + "err_out": io.Errout, + "drop_in": io.Dropin, + "drop_out": io.Dropout, + } + acc.AddFields("net", fields, tags) } // Get system wide stats for different network protocols diff --git a/plugins/system/net_test.go b/plugins/system/net_test.go new file mode 100644 index 000000000..042b6a2fb --- /dev/null +++ b/plugins/system/net_test.go @@ -0,0 +1,88 @@ +package system + +import ( + "syscall" + "testing" + + "github.com/influxdb/telegraf/testutil" + "github.com/shirou/gopsutil/net" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNetStats(t *testing.T) { + var mps MockPS + var err error + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + netio := net.NetIOCountersStat{ + Name: "eth0", + BytesSent: 1123, + BytesRecv: 8734422, + PacketsSent: 781, + PacketsRecv: 23456, + Errin: 832, + Errout: 8, + Dropin: 7, + Dropout: 1, + } + + mps.On("NetIO").Return([]net.NetIOCountersStat{netio}, nil) + + netprotos := []net.NetProtoCountersStat{ + net.NetProtoCountersStat{ + Protocol: "Udp", + Stats: map[string]int64{ + "InDatagrams": 4655, + "NoPorts": 892592, + }, + }, + } + mps.On("NetProto").Return(netprotos, nil) + + netstats := []net.NetConnectionStat{ + net.NetConnectionStat{ + Type: syscall.SOCK_DGRAM, + }, + net.NetConnectionStat{ + Status: "ESTABLISHED", + }, + net.NetConnectionStat{ + Status: "ESTABLISHED", + }, + net.NetConnectionStat{ + Status: "CLOSE", + }, + } + + mps.On("NetConnections").Return(netstats, nil) + + err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc) + require.NoError(t, err) + + ntags := map[string]string{ + "interface": "eth0", + } + + assert.NoError(t, acc.ValidateTaggedValue("bytes_sent", uint64(1123), ntags)) + assert.NoError(t, acc.ValidateTaggedValue("bytes_recv", uint64(8734422), ntags)) + assert.NoError(t, acc.ValidateTaggedValue("packets_sent", uint64(781), ntags)) + assert.NoError(t, acc.ValidateTaggedValue("packets_recv", uint64(23456), ntags)) + assert.NoError(t, acc.ValidateTaggedValue("err_in", uint64(832), ntags)) + assert.NoError(t, acc.ValidateTaggedValue("err_out", uint64(8), ntags)) + assert.NoError(t, acc.ValidateTaggedValue("drop_in", uint64(7), ntags)) + assert.NoError(t, acc.ValidateTaggedValue("drop_out", uint64(1), ntags)) + assert.NoError(t, acc.ValidateValue("udp_noports", int64(892592))) + assert.NoError(t, acc.ValidateValue("udp_indatagrams", int64(4655))) + + acc.Points = nil + + err = (&NetStats{&mps}).Gather(&acc) + require.NoError(t, err) + netstattags := map[string]string(nil) + + assert.NoError(t, acc.ValidateTaggedValue("tcp_established", 2, netstattags)) + assert.NoError(t, acc.ValidateTaggedValue("tcp_close", 1, netstattags)) + assert.NoError(t, acc.ValidateTaggedValue("udp_socket", 1, netstattags)) +} diff --git a/plugins/system/netstat.go b/plugins/system/netstat.go index 9fe512ddd..bd28971bc 100644 --- a/plugins/system/netstat.go +++ b/plugins/system/netstat.go @@ -42,19 +42,23 @@ func (s *NetStats) Gather(acc plugins.Accumulator) error { } counts[netcon.Status] = c + 1 } - acc.Add("tcp_established", counts["ESTABLISHED"], tags) - acc.Add("tcp_syn_sent", counts["SYN_SENT"], tags) - acc.Add("tcp_syn_recv", counts["SYN_RECV"], tags) - acc.Add("tcp_fin_wait1", counts["FIN_WAIT1"], tags) - acc.Add("tcp_fin_wait2", counts["FIN_WAIT2"], tags) - acc.Add("tcp_time_wait", counts["TIME_WAIT"], tags) - acc.Add("tcp_close", counts["CLOSE"], tags) - acc.Add("tcp_close_wait", counts["CLOSE_WAIT"], tags) - acc.Add("tcp_last_ack", counts["LAST_ACK"], tags) - acc.Add("tcp_listen", counts["LISTEN"], tags) - acc.Add("tcp_closing", counts["CLOSING"], tags) - acc.Add("tcp_none", counts["NONE"], tags) - acc.Add("udp_socket", counts["UDP"], tags) + + fields := map[string]interface{}{ + "tcp_established": counts["ESTABLISHED"], + "tcp_syn_sent": counts["SYN_SENT"], + "tcp_syn_recv": counts["SYN_RECV"], + "tcp_fin_wait1": counts["FIN_WAIT1"], + "tcp_fin_wait2": counts["FIN_WAIT2"], + "tcp_time_wait": counts["TIME_WAIT"], + "tcp_close": counts["CLOSE"], + "tcp_close_wait": counts["CLOSE_WAIT"], + "tcp_last_ack": counts["LAST_ACK"], + "tcp_listen": counts["LISTEN"], + "tcp_closing": counts["CLOSING"], + "tcp_none": counts["NONE"], + "udp_socket": counts["UDP"], + } + acc.AddFields("netstat", fields, tags) return nil } diff --git a/plugins/system/ps.go b/plugins/system/ps.go index d0c35c62c..0b7a38527 100644 --- a/plugins/system/ps.go +++ b/plugins/system/ps.go @@ -1,12 +1,16 @@ package system import ( + "fmt" gonet "net" "os" + "reflect" "strings" + "testing" "github.com/influxdb/telegraf/internal" "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/testutil" dc "github.com/fsouza/go-dockerclient" "github.com/shirou/gopsutil/cpu" @@ -14,6 +18,8 @@ import ( "github.com/shirou/gopsutil/docker" "github.com/shirou/gopsutil/mem" "github.com/shirou/gopsutil/net" + + "github.com/stretchr/testify/assert" ) type DockerContainerStat struct { @@ -166,3 +172,49 @@ func (s *systemPS) DockerStat() ([]*DockerContainerStat, error) { return stats, nil } + +// Asserts that a given accumulator contains a measurment of type float64 with +// specific tags within a certain distance of a given expected value. Asserts a failure +// if the measurement is of the wrong type, or if no matching measurements are found +// +// Paramaters: +// t *testing.T : Testing object to use +// acc testutil.Accumulator: Accumulator to examine +// measurement string : Name of the measurement to examine +// expectedValue float64 : Value to search for within the measurement +// delta float64 : Maximum acceptable distance of an accumulated value +// from the expectedValue parameter. Useful when +// floating-point arithmatic imprecision makes looking +// for an exact match impractical +// tags map[string]string : Tag set the found measurement must have. Set to nil to +// ignore the tag set. +func assertContainsTaggedFloat( + t *testing.T, + acc *testutil.Accumulator, + measurement string, + expectedValue float64, + delta float64, + tags map[string]string, +) { + var actualValue float64 + for _, pt := range acc.Points { + if pt.Measurement == measurement { + if (tags == nil) || reflect.DeepEqual(pt.Tags, tags) { + if value, ok := pt.Fields["value"].(float64); ok { + actualValue = value + if (value >= expectedValue-delta) && (value <= expectedValue+delta) { + // Found the point, return without failing + return + } + } else { + assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", + measurement)) + } + + } + } + } + msg := fmt.Sprintf("Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", + measurement, delta, expectedValue, actualValue) + assert.Fail(t, msg) +} diff --git a/plugins/system/system.go b/plugins/system/system.go index 4481ac0a3..82d4f4f24 100644 --- a/plugins/system/system.go +++ b/plugins/system/system.go @@ -37,11 +37,14 @@ func (_ *SystemStats) Gather(acc plugins.Accumulator) error { return err } - acc.Add("load1", loadavg.Load1, nil) - acc.Add("load5", loadavg.Load5, nil) - acc.Add("load15", loadavg.Load15, nil) - acc.Add("uptime", float64(hostinfo.Uptime), nil) - acc.Add("uptime_format", format_uptime(hostinfo.Uptime), nil) + fields := map[string]interface{}{ + "load1": loadavg.Load1, + "load5": loadavg.Load5, + "load15": loadavg.Load15, + "uptime": float64(hostinfo.Uptime), + "uptime_format": format_uptime(hostinfo.Uptime), + } + acc.AddFields("system", fields, nil) return nil } diff --git a/plugins/system/system_test.go b/plugins/system/system_test.go deleted file mode 100644 index fca1d2c35..000000000 --- a/plugins/system/system_test.go +++ /dev/null @@ -1,426 +0,0 @@ -package system - -import ( - "fmt" - "reflect" - "syscall" - "testing" - - "github.com/influxdb/telegraf/testutil" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/disk" - "github.com/shirou/gopsutil/mem" - "github.com/shirou/gopsutil/net" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestSystemStats_GenerateStats(t *testing.T) { - var mps MockPS - - defer mps.AssertExpectations(t) - - var acc testutil.Accumulator - - cts := cpu.CPUTimesStat{ - CPU: "cpu0", - User: 3.1, - System: 8.2, - Idle: 80.1, - Nice: 1.3, - Iowait: 0.2, - Irq: 0.1, - Softirq: 0.11, - Steal: 0.0511, - Guest: 8.1, - GuestNice: 0.324, - } - - cts2 := cpu.CPUTimesStat{ - CPU: "cpu0", - User: 11.4, // increased by 8.3 - System: 10.9, // increased by 2.7 - Idle: 158.8699, // increased by 78.7699 (for total increase of 100) - Nice: 2.5, // increased by 1.2 - Iowait: 0.7, // increased by 0.5 - Irq: 1.2, // increased by 1.1 - Softirq: 0.31, // increased by 0.2 - Steal: 0.2812, // increased by 0.0001 - Guest: 12.9, // increased by 4.8 - GuestNice: 2.524, // increased by 2.2 - } - - mps.On("CPUTimes").Return([]cpu.CPUTimesStat{cts}, nil) - - du := []*disk.DiskUsageStat{ - { - Path: "/", - Fstype: "ext4", - Total: 128, - Free: 23, - InodesTotal: 1234, - InodesFree: 234, - }, - { - Path: "/home", - Fstype: "ext4", - Total: 256, - Free: 46, - InodesTotal: 2468, - InodesFree: 468, - }, - } - - mps.On("DiskUsage").Return(du, nil) - - diskio1 := disk.DiskIOCountersStat{ - - ReadCount: 888, - WriteCount: 5341, - ReadBytes: 100000, - WriteBytes: 200000, - ReadTime: 7123, - WriteTime: 9087, - Name: "sda1", - IoTime: 123552, - SerialNumber: "ab-123-ad", - } - diskio2 := disk.DiskIOCountersStat{ - ReadCount: 444, - WriteCount: 2341, - ReadBytes: 200000, - WriteBytes: 400000, - ReadTime: 3123, - WriteTime: 6087, - Name: "sdb1", - IoTime: 246552, - SerialNumber: "bb-123-ad", - } - - mps.On("DiskIO").Return(map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2}, nil) - - netio := net.NetIOCountersStat{ - Name: "eth0", - BytesSent: 1123, - BytesRecv: 8734422, - PacketsSent: 781, - PacketsRecv: 23456, - Errin: 832, - Errout: 8, - Dropin: 7, - Dropout: 1, - } - - mps.On("NetIO").Return([]net.NetIOCountersStat{netio}, nil) - - netprotos := []net.NetProtoCountersStat{ - net.NetProtoCountersStat{ - Protocol: "Udp", - Stats: map[string]int64{ - "InDatagrams": 4655, - "NoPorts": 892592, - }, - }, - } - mps.On("NetProto").Return(netprotos, nil) - - vms := &mem.VirtualMemoryStat{ - Total: 12400, - Available: 7600, - Used: 5000, - Free: 1235, - // Active: 8134, - // Inactive: 1124, - // Buffers: 771, - // Cached: 4312, - // Wired: 134, - // Shared: 2142, - } - - mps.On("VMStat").Return(vms, nil) - - sms := &mem.SwapMemoryStat{ - Total: 8123, - Used: 1232, - Free: 6412, - UsedPercent: 12.2, - Sin: 7, - Sout: 830, - } - - mps.On("SwapStat").Return(sms, nil) - - netstats := []net.NetConnectionStat{ - net.NetConnectionStat{ - Type: syscall.SOCK_DGRAM, - }, - net.NetConnectionStat{ - Status: "ESTABLISHED", - }, - net.NetConnectionStat{ - Status: "ESTABLISHED", - }, - net.NetConnectionStat{ - Status: "CLOSE", - }, - } - - mps.On("NetConnections").Return(netstats, nil) - - cs := NewCPUStats(&mps) - - cputags := map[string]string{ - "cpu": "cpu0", - } - - preCPUPoints := len(acc.Points) - err := cs.Gather(&acc) - require.NoError(t, err) - numCPUPoints := len(acc.Points) - preCPUPoints - - expectedCPUPoints := 10 - assert.Equal(t, expectedCPUPoints, numCPUPoints) - - // Computed values are checked with delta > 0 becasue of floating point arithmatic - // imprecision - assertContainsTaggedFloat(t, &acc, "time_user", 3.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_iowait", 0.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_irq", 0.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest", 8.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0, cputags) - - mps2 := MockPS{} - mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil) - cs.ps = &mps2 - - // Should have added cpu percentages too - err = cs.Gather(&acc) - require.NoError(t, err) - - numCPUPoints = len(acc.Points) - (preCPUPoints + numCPUPoints) - expectedCPUPoints = 20 - assert.Equal(t, expectedCPUPoints, numCPUPoints) - - assertContainsTaggedFloat(t, &acc, "time_user", 11.4, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_idle", 158.8699, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_nice", 2.5, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_iowait", 0.7, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest", 12.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0, cputags) - - assertContainsTaggedFloat(t, &acc, "usage_user", 8.3, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_idle", 78.7699, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_nice", 1.2, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.5, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_irq", 1.1, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_guest", 4.8, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005, cputags) - - preDiskPoints := len(acc.Points) - - err = (&DiskStats{ps: &mps}).Gather(&acc) - require.NoError(t, err) - - numDiskPoints := len(acc.Points) - preDiskPoints - expectedAllDiskPoints := 12 - assert.Equal(t, expectedAllDiskPoints, numDiskPoints) - - tags1 := map[string]string{ - "path": "/", - "fstype": "ext4", - } - tags2 := map[string]string{ - "path": "/home", - "fstype": "ext4", - } - - assert.True(t, acc.CheckTaggedValue("total", uint64(128), tags1)) - assert.True(t, acc.CheckTaggedValue("used", uint64(105), tags1)) - assert.True(t, acc.CheckTaggedValue("free", uint64(23), tags1)) - assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(1234), tags1)) - assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(234), tags1)) - assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(1000), tags1)) - assert.True(t, acc.CheckTaggedValue("total", uint64(256), tags2)) - assert.True(t, acc.CheckTaggedValue("used", uint64(210), tags2)) - assert.True(t, acc.CheckTaggedValue("free", uint64(46), tags2)) - assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(2468), tags2)) - assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(468), tags2)) - assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(2000), tags2)) - - // We expect 6 more DiskPoints to show up with an explicit match on "/" - // and /home not matching the /dev in Mountpoints - err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc) - assert.Equal(t, preDiskPoints+expectedAllDiskPoints+6, len(acc.Points)) - - // We should see all the diskpoints as Mountpoints includes both - // / and /home - err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc) - assert.Equal(t, preDiskPoints+2*expectedAllDiskPoints+6, len(acc.Points)) - - err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc) - require.NoError(t, err) - - ntags := map[string]string{ - "interface": "eth0", - } - - assert.NoError(t, acc.ValidateTaggedValue("bytes_sent", uint64(1123), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("bytes_recv", uint64(8734422), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_sent", uint64(781), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_recv", uint64(23456), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("err_in", uint64(832), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("err_out", uint64(8), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("drop_in", uint64(7), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("drop_out", uint64(1), ntags)) - assert.NoError(t, acc.ValidateValue("udp_noports", int64(892592))) - assert.NoError(t, acc.ValidateValue("udp_indatagrams", int64(4655))) - - preDiskIOPoints := len(acc.Points) - - err = (&DiskIOStats{ps: &mps}).Gather(&acc) - require.NoError(t, err) - - numDiskIOPoints := len(acc.Points) - preDiskIOPoints - expectedAllDiskIOPoints := 14 - assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints) - - dtags1 := map[string]string{ - "name": "sda1", - "serial": "ab-123-ad", - } - dtags2 := map[string]string{ - "name": "sdb1", - "serial": "bb-123-ad", - } - - assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1)) - assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1)) - assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1)) - assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1)) - assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1)) - assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1)) - assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1)) - assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2)) - assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2)) - assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2)) - assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2)) - assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2)) - assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2)) - assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2)) - - // We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1" - // and serial should be missing from the tags with SkipSerialNumber set - err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc) - assert.Equal(t, preDiskIOPoints+expectedAllDiskIOPoints+7, len(acc.Points)) - - dtags3 := map[string]string{ - "name": "sdb1", - } - - assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3)) - assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3)) - assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3)) - assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3)) - assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3)) - assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3)) - assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3)) - - err = (&MemStats{&mps}).Gather(&acc) - require.NoError(t, err) - - vmtags := map[string]string(nil) - - assert.True(t, acc.CheckTaggedValue("total", uint64(12400), vmtags)) - assert.True(t, acc.CheckTaggedValue("available", uint64(7600), vmtags)) - assert.True(t, acc.CheckTaggedValue("used", uint64(5000), vmtags)) - assert.True(t, acc.CheckTaggedValue("available_percent", - float64(7600)/float64(12400)*100, - vmtags)) - assert.True(t, acc.CheckTaggedValue("used_percent", - float64(5000)/float64(12400)*100, - vmtags)) - assert.True(t, acc.CheckTaggedValue("free", uint64(1235), vmtags)) - - acc.Points = nil - - err = (&SwapStats{&mps}).Gather(&acc) - require.NoError(t, err) - - swaptags := map[string]string(nil) - - assert.NoError(t, acc.ValidateTaggedValue("total", uint64(8123), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("used", uint64(1232), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("used_percent", float64(12.2), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("free", uint64(6412), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("in", uint64(7), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("out", uint64(830), swaptags)) - - acc.Points = nil - - err = (&NetStats{&mps}).Gather(&acc) - require.NoError(t, err) - netstattags := map[string]string(nil) - - assert.NoError(t, acc.ValidateTaggedValue("tcp_established", 2, netstattags)) - assert.NoError(t, acc.ValidateTaggedValue("tcp_close", 1, netstattags)) - assert.NoError(t, acc.ValidateTaggedValue("udp_socket", 1, netstattags)) - -} - -// Asserts that a given accumulator contains a measurment of type float64 with -// specific tags within a certain distance of a given expected value. Asserts a failure -// if the measurement is of the wrong type, or if no matching measurements are found -// -// Paramaters: -// t *testing.T : Testing object to use -// acc testutil.Accumulator: Accumulator to examine -// measurement string : Name of the measurement to examine -// expectedValue float64 : Value to search for within the measurement -// delta float64 : Maximum acceptable distance of an accumulated value -// from the expectedValue parameter. Useful when -// floating-point arithmatic imprecision makes looking -// for an exact match impractical -// tags map[string]string : Tag set the found measurement must have. Set to nil to -// ignore the tag set. -func assertContainsTaggedFloat( - t *testing.T, - acc *testutil.Accumulator, - measurement string, - expectedValue float64, - delta float64, - tags map[string]string, -) { - var actualValue float64 - for _, pt := range acc.Points { - if pt.Measurement == measurement { - if (tags == nil) || reflect.DeepEqual(pt.Tags, tags) { - if value, ok := pt.Fields["value"].(float64); ok { - actualValue = value - if (value >= expectedValue-delta) && (value <= expectedValue+delta) { - // Found the point, return without failing - return - } - } else { - assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", - measurement)) - } - - } - } - } - msg := fmt.Sprintf("Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", - measurement, delta, expectedValue, actualValue) - assert.Fail(t, msg) -} From 97a66b73cfa54f04f508d1da43257ea83e55f13c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Dec 2015 13:10:55 -0600 Subject: [PATCH 002/103] Updating aerospike & apache plugins for 0.3.0 --- plugins/aerospike/aerospike.go | 28 +++++++++++++++---------- plugins/apache/apache.go | 38 +++++++++++++++++++--------------- 2 files changed, 38 insertions(+), 28 deletions(-) diff --git a/plugins/aerospike/aerospike.go b/plugins/aerospike/aerospike.go index cf372aeb4..9d920646d 100644 --- a/plugins/aerospike/aerospike.go +++ b/plugins/aerospike/aerospike.go @@ -247,26 +247,32 @@ func get(key []byte, host string) (map[string]string, error) { return data, err } -func readAerospikeStats(stats map[string]string, acc plugins.Accumulator, host, namespace string) { +func readAerospikeStats( + stats map[string]string, + acc plugins.Accumulator, + host string, + namespace string, +) { + fields := make(map[string]interface{}) + tags := map[string]string{ + "aerospike_host": host, + "namespace": "_service", + } + + if namespace != "" { + tags["namespace"] = namespace + } for key, value := range stats { - tags := map[string]string{ - "aerospike_host": host, - "namespace": "_service", - } - - if namespace != "" { - tags["namespace"] = namespace - } - // We are going to ignore all string based keys val, err := strconv.ParseInt(value, 10, 64) if err == nil { if strings.Contains(key, "-") { key = strings.Replace(key, "-", "_", -1) } - acc.Add(key, val, tags) + fields[key] = val } } + acc.AddFields("aerospike", fields, tags) } func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) { diff --git a/plugins/apache/apache.go b/plugins/apache/apache.go index 3cebecb22..958a0296e 100644 --- a/plugins/apache/apache.go +++ b/plugins/apache/apache.go @@ -72,32 +72,33 @@ func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error { tags := getTags(addr) sc := bufio.NewScanner(resp.Body) + fields := make(map[string]interface{}) for sc.Scan() { line := sc.Text() if strings.Contains(line, ":") { - parts := strings.SplitN(line, ":", 2) key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1]) switch key { - case "Scoreboard": - n.gatherScores(part, acc, tags) + for field, value := range n.gatherScores(part) { + fields[field] = value + } default: value, err := strconv.ParseFloat(part, 64) if err != nil { continue } - acc.Add(key, value, tags) + fields[key] = value } } } + acc.AddFields("apache", fields, tags) return nil } -func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[string]string) { - +func (n *Apache) gatherScores(data string) map[string]interface{} { var waiting, open int = 0, 0 var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0 @@ -129,17 +130,20 @@ func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[str } } - acc.Add("scboard_waiting", float64(waiting), tags) - acc.Add("scboard_starting", float64(S), tags) - acc.Add("scboard_reading", float64(R), tags) - acc.Add("scboard_sending", float64(W), tags) - acc.Add("scboard_keepalive", float64(K), tags) - acc.Add("scboard_dnslookup", float64(D), tags) - acc.Add("scboard_closing", float64(C), tags) - acc.Add("scboard_logging", float64(L), tags) - acc.Add("scboard_finishing", float64(G), tags) - acc.Add("scboard_idle_cleanup", float64(I), tags) - acc.Add("scboard_open", float64(open), tags) + fields := map[string]interface{}{ + "scboard_waiting": float64(waiting), + "scboard_starting": float64(S), + "scboard_reading": float64(R), + "scboard_sending": float64(W), + "scboard_keepalive": float64(K), + "scboard_dnslookup": float64(D), + "scboard_closing": float64(C), + "scboard_logging": float64(L), + "scboard_finishing": float64(G), + "scboard_idle_cleanup": float64(I), + "scboard_open": float64(open), + } + return fields } // Get tag(s) for the apache plugin From 3be111a160e32399d9997c8eb073e101dcc7d844 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Dec 2015 16:15:51 -0600 Subject: [PATCH 003/103] Breakout JSON flattening into internal package, exec & elasticsearch aggregation --- internal/internal.go | 34 +++++++++++++++++++++++++ plugins/bcache/bcache.go | 11 +++++--- plugins/disque/disque.go | 9 ++++--- plugins/elasticsearch/README.md | 7 +++--- plugins/elasticsearch/elasticsearch.go | 33 ++++++++---------------- plugins/exec/exec.go | 35 +++++++++++++++----------- plugins/system/system.go | 9 +------ 7 files changed, 81 insertions(+), 57 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index 45164682b..93c467808 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -3,6 +3,7 @@ package internal import ( "bufio" "errors" + "fmt" "os" "strings" "time" @@ -27,6 +28,39 @@ func (d *Duration) UnmarshalTOML(b []byte) error { var NotImplementedError = errors.New("not implemented yet") +type JSONFlattener struct { + Fields map[string]interface{} +} + +// FlattenJSON flattens nested maps/interfaces into a fields map +func (f *JSONFlattener) FlattenJSON( + fieldname string, + v interface{}, +) error { + if f.Fields == nil { + f.Fields = make(map[string]interface{}) + } + fieldname = strings.Trim(fieldname, "_") + switch t := v.(type) { + case map[string]interface{}: + for k, v := range t { + err := f.FlattenJSON(fieldname+"_"+k+"_", v) + if err != nil { + return err + } + } + case float64: + f.Fields[fieldname] = t + case bool, string, []interface{}: + // ignored types + return nil + default: + return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)", + t, t, fieldname) + } + return nil +} + // ReadLines reads contents from a file and splits them by new lines. // A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). func ReadLines(filename string) ([]string, error) { diff --git a/plugins/bcache/bcache.go b/plugins/bcache/bcache.go index 76e638ea4..92cea3d63 100644 --- a/plugins/bcache/bcache.go +++ b/plugins/bcache/bcache.go @@ -81,7 +81,9 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error { } rawValue := strings.TrimSpace(string(file)) value := prettyToBytes(rawValue) - acc.Add("dirty_data", value, tags) + + fields := make(map[string]interface{}) + fields["dirty_data"] = value for _, path := range metrics { key := filepath.Base(path) @@ -92,12 +94,13 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error { } if key == "bypassed" { value := prettyToBytes(rawValue) - acc.Add(key, value, tags) + fields[key] = value } else { value, _ := strconv.ParseUint(rawValue, 10, 64) - acc.Add(key, value, tags) + fields[key] = value } } + acc.AddFields("bcache", fields, tags) return nil } @@ -117,7 +120,7 @@ func (b *Bcache) Gather(acc plugins.Accumulator) error { } bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*") if len(bdevs) < 1 { - return errors.New("Can't found any bcache device") + return errors.New("Can't find any bcache device") } for _, bdev := range bdevs { if restrictDevs { diff --git a/plugins/disque/disque.go b/plugins/disque/disque.go index 004aa3c0f..b7b7dd5c1 100644 --- a/plugins/disque/disque.go +++ b/plugins/disque/disque.go @@ -155,6 +155,8 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error { var read int + fields := make(map[string]interface{}) + tags := map[string]string{"host": addr.String()} for read < sz { line, err := r.ReadString('\n') if err != nil { @@ -176,12 +178,11 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error { continue } - tags := map[string]string{"host": addr.String()} val := strings.TrimSpace(parts[1]) ival, err := strconv.ParseUint(val, 10, 64) if err == nil { - acc.Add(metric, ival, tags) + fields[metric] = ival continue } @@ -190,9 +191,9 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error { return err } - acc.Add(metric, fval, tags) + fields[metric] = fval } - + acc.AddFields("disque", fields, tags) return nil } diff --git a/plugins/elasticsearch/README.md b/plugins/elasticsearch/README.md index dbc9a3587..03acad034 100644 --- a/plugins/elasticsearch/README.md +++ b/plugins/elasticsearch/README.md @@ -31,8 +31,9 @@ contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`, `initializing_shards`, `unassigned_shards` fields - elasticsearch_cluster_health -contains `status`, `number_of_shards`, `number_of_replicas`, `active_primary_shards`, -`active_shards`, `relocating_shards`, `initializing_shards`, `unassigned_shards` fields +contains `status`, `number_of_shards`, `number_of_replicas`, +`active_primary_shards`, `active_shards`, `relocating_shards`, +`initializing_shards`, `unassigned_shards` fields - elasticsearch_indices #### node measurements: @@ -316,4 +317,4 @@ Transport statistics about sent and received bytes in cluster communication meas - elasticsearch_transport_rx_count value=6 - elasticsearch_transport_rx_size_in_bytes value=1380 - elasticsearch_transport_tx_count value=6 -- elasticsearch_transport_tx_size_in_bytes value=1380 \ No newline at end of file +- elasticsearch_transport_tx_size_in_bytes value=1380 diff --git a/plugins/elasticsearch/elasticsearch.go b/plugins/elasticsearch/elasticsearch.go index bfe6f20bb..2266f2243 100644 --- a/plugins/elasticsearch/elasticsearch.go +++ b/plugins/elasticsearch/elasticsearch.go @@ -6,6 +6,7 @@ import ( "net/http" "time" + "github.com/influxdb/telegraf/internal" "github.com/influxdb/telegraf/plugins" ) @@ -141,10 +142,14 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) err "breakers": n.Breakers, } + now := time.Now() for p, s := range stats { - if err := e.parseInterface(acc, p, tags, s); err != nil { + f := internal.JSONFlattener{} + err := f.FlattenJSON("", s) + if err != nil { return err } + acc.AddFields("elasticsearch_"+p, f.Fields, tags, now) } } return nil @@ -168,7 +173,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) "unassigned_shards": clusterStats.UnassignedShards, } acc.AddFields( - "cluster_health", + "elasticsearch_cluster_health", clusterFields, map[string]string{"name": clusterStats.ClusterName}, measurementTime, @@ -186,7 +191,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) "unassigned_shards": health.UnassignedShards, } acc.AddFields( - "indices", + "elasticsearch_indices", indexFields, map[string]string{"index": name}, measurementTime, @@ -205,7 +210,8 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error { // NOTE: we are not going to read/discard r.Body under the assumption we'd prefer // to let the underlying transport close the connection and re-establish a new one for // future calls. - return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) + return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d", + r.StatusCode, http.StatusOK) } if err = json.NewDecoder(r.Body).Decode(v); err != nil { return err @@ -213,25 +219,6 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error { return nil } -func (e *Elasticsearch) parseInterface(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) error { - switch t := v.(type) { - case map[string]interface{}: - for k, v := range t { - if err := e.parseInterface(acc, prefix+"_"+k, tags, v); err != nil { - return err - } - } - case float64: - acc.Add(prefix, t, tags) - case bool, string, []interface{}: - // ignored types - return nil - default: - return fmt.Errorf("elasticsearch: got unexpected type %T with value %v (%s)", t, t, prefix) - } - return nil -} - func init() { plugins.Add("elasticsearch", func() plugins.Plugin { return NewElasticsearch() diff --git a/plugins/exec/exec.go b/plugins/exec/exec.go index d4a42b6c4..1571b6bf4 100644 --- a/plugins/exec/exec.go +++ b/plugins/exec/exec.go @@ -5,13 +5,16 @@ import ( "encoding/json" "errors" "fmt" - "github.com/gonuts/go-shellquote" - "github.com/influxdb/telegraf/plugins" "math" "os/exec" "strings" "sync" "time" + + "github.com/gonuts/go-shellquote" + + "github.com/influxdb/telegraf/internal" + "github.com/influxdb/telegraf/plugins" ) const sampleConfig = ` @@ -136,25 +139,27 @@ func (e *Exec) gatherCommand(c *Command, acc plugins.Accumulator) error { var jsonOut interface{} err = json.Unmarshal(out, &jsonOut) if err != nil { - return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s", c.Command, err) + return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s", + c.Command, err) } - processResponse(acc, c.Name, map[string]string{}, jsonOut) + f := internal.JSONFlattener{} + err = f.FlattenJSON("", jsonOut) + if err != nil { + return err + } + + var msrmnt_name string + if c.Name == "" { + msrmnt_name = "exec" + } else { + msrmnt_name = "exec_" + c.Name + } + acc.AddFields(msrmnt_name, f.Fields, nil) } return nil } -func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) { - switch t := v.(type) { - case map[string]interface{}: - for k, v := range t { - processResponse(acc, prefix+"_"+k, tags, v) - } - case float64: - acc.Add(prefix, v, tags) - } -} - func init() { plugins.Add("exec", func() plugins.Plugin { return NewExec() diff --git a/plugins/system/system.go b/plugins/system/system.go index 82d4f4f24..1adf6c051 100644 --- a/plugins/system/system.go +++ b/plugins/system/system.go @@ -19,13 +19,6 @@ func (_ *SystemStats) Description() string { func (_ *SystemStats) SampleConfig() string { return "" } -func (_ *SystemStats) add(acc plugins.Accumulator, - name string, val float64, tags map[string]string) { - if val >= 0 { - acc.Add(name, val, tags) - } -} - func (_ *SystemStats) Gather(acc plugins.Accumulator) error { loadavg, err := load.LoadAvg() if err != nil { @@ -41,7 +34,7 @@ func (_ *SystemStats) Gather(acc plugins.Accumulator) error { "load1": loadavg.Load1, "load5": loadavg.Load5, "load15": loadavg.Load15, - "uptime": float64(hostinfo.Uptime), + "uptime": hostinfo.Uptime, "uptime_format": format_uptime(hostinfo.Uptime), } acc.AddFields("system", fields, nil) From 38d6cb97ad9161a108cf2cd00287b4b539104ed7 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Dec 2015 16:45:29 -0600 Subject: [PATCH 004/103] 0.3.0: HAProxy --- plugins/haproxy/haproxy.go | 87 ++++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 41 deletions(-) diff --git a/plugins/haproxy/haproxy.go b/plugins/haproxy/haproxy.go index e5ce6e404..88a74a9ad 100644 --- a/plugins/haproxy/haproxy.go +++ b/plugins/haproxy/haproxy.go @@ -9,6 +9,7 @@ import ( "net/url" "strconv" "sync" + "time" ) //CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1 @@ -137,7 +138,8 @@ func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error { return fmt.Errorf("Unable parse server address '%s': %s", addr, err) } - req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s/;csv", u.Scheme, u.Host, u.Path), nil) + req, err := http.NewRequest("GET", + fmt.Sprintf("%s://%s%s/;csv", u.Scheme, u.Host, u.Path), nil) if u.User != nil { p, _ := u.User.Password() req.SetBasicAuth(u.User.Username(), p) @@ -152,43 +154,42 @@ func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error { return fmt.Errorf("Unable to get valid stat result from '%s': %s", addr, err) } - importCsvResult(res.Body, acc, u.Host) - - return nil + return importCsvResult(res.Body, acc, u.Host) } -func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) ([][]string, error) { +func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error { csv := csv.NewReader(r) result, err := csv.ReadAll() + now := time.Now() for _, row := range result { - + fields := make(map[string]interface{}) + tags := map[string]string{ + "server": host, + "proxy": row[HF_PXNAME], + "sv": row[HF_SVNAME], + } for field, v := range row { - tags := map[string]string{ - "server": host, - "proxy": row[HF_PXNAME], - "sv": row[HF_SVNAME], - } switch field { case HF_QCUR: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("qcur", ival, tags) + fields["qcur"] = ival } case HF_QMAX: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("qmax", ival, tags) + fields["qmax"] = ival } case HF_SCUR: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("scur", ival, tags) + fields["scur"] = ival } case HF_SMAX: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("smax", ival, tags) + fields["smax"] = ival } case HF_STOT: ival, err := strconv.ParseUint(v, 10, 64) @@ -198,22 +199,22 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) ([][]str case HF_BIN: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("bin", ival, tags) + fields["bin"] = ival } case HF_BOUT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("bout", ival, tags) + fields["bout"] = ival } case HF_DREQ: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("dreq", ival, tags) + fields["dreq"] = ival } case HF_DRESP: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("dresp", ival, tags) + fields["dresp"] = ival } case HF_EREQ: ival, err := strconv.ParseUint(v, 10, 64) @@ -268,94 +269,98 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) ([][]str case HF_RATE: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("rate", ival, tags) + fields["rate"] = ival } case HF_RATE_MAX: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("rate_max", ival, tags) + fields["rate_max"] = ival } case HF_CHECK_DURATION: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("check_duration", ival, tags) + fields["stot"] = ival } case HF_HRSP_1xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.1xx", ival, tags) + fields["http_response.1xx"] = ival } case HF_HRSP_2xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.2xx", ival, tags) + fields["http_response.2xx"] = ival } case HF_HRSP_3xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.3xx", ival, tags) + fields["http_response.3xx"] = ival } case HF_HRSP_4xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.4xx", ival, tags) + fields["http_response.4xx"] = ival } case HF_HRSP_5xx: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("http_response.5xx", ival, tags) + fields["http_response.5xx"] = ival + } + case HF_EREQ: + ival, err := strconv.ParseUint(v, 10, 64) + if err == nil { + fields["ereq"] = ival } case HF_REQ_RATE: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("req_rate", ival, tags) + fields["eresp"] = ival } - case HF_REQ_RATE_MAX: + case HF_ECON: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("req_rate_max", ival, tags) + fields["econ"] = ival } - case HF_REQ_TOT: + case HF_WRETR: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("req_tot", ival, tags) + fields["wretr"] = ival } case HF_CLI_ABRT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("cli_abort", ival, tags) + fields["wredis"] = ival } case HF_SRV_ABRT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("srv_abort", ival, tags) + fields["req_rate"] = ival } case HF_QTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("qtime", ival, tags) + fields["req_rate_max"] = ival } case HF_CTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("ctime", ival, tags) + fields["req_tot"] = ival } case HF_RTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("rtime", ival, tags) + fields["throttle"] = ival } case HF_TTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("ttime", ival, tags) + fields["lbtot"] = ival } - } - } + acc.AddFields("haproxy", fields, tags, now) } - return result, err + return err } func init() { From 4798bd9d339aa60a9ea5a10f92fe1b16561782cf Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Dec 2015 16:57:17 -0600 Subject: [PATCH 005/103] 0.3.0 httpjson --- plugins/httpjson/httpjson.go | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/plugins/httpjson/httpjson.go b/plugins/httpjson/httpjson.go index f1d2ef927..e2b44b7a9 100644 --- a/plugins/httpjson/httpjson.go +++ b/plugins/httpjson/httpjson.go @@ -10,6 +10,7 @@ import ( "strings" "sync" + "github.com/influxdb/telegraf/internal" "github.com/influxdb/telegraf/plugins" ) @@ -154,7 +155,19 @@ func (h *HttpJson) gatherServer( delete(jsonOut, tag) } - processResponse(acc, service.Name, tags, jsonOut) + f := internal.JSONFlattener{} + err = f.FlattenJSON("", jsonOut) + if err != nil { + return err + } + + var msrmnt_name string + if service.Name == "" { + msrmnt_name = "httpjson" + } else { + msrmnt_name = "httpjson_" + service.Name + } + acc.AddFields(msrmnt_name, f.Fields, nil) return nil } @@ -209,23 +222,6 @@ func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error return string(body), err } -// Flattens the map generated from the JSON object and stores its float values using a -// plugins.Accumulator. It ignores any non-float values. -// Parameters: -// acc: the Accumulator to use -// prefix: What the name of the measurement name should be prefixed by. -// tags: telegraf tags to -func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) { - switch t := v.(type) { - case map[string]interface{}: - for k, v := range t { - processResponse(acc, prefix+"_"+k, tags, v) - } - case float64: - acc.Add(prefix, v, tags) - } -} - func init() { plugins.Add("httpjson", func() plugins.Plugin { return &HttpJson{client: RealHTTPClient{client: &http.Client{}}} From e30e98a4960762ee4d1c65d593e1e9196b648dca Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Dec 2015 17:11:54 -0600 Subject: [PATCH 006/103] 0.3.0: leofs & lustre2 --- plugins/leofs/leofs.go | 5 ++++- plugins/lustre2/lustre2.go | 12 ++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/plugins/leofs/leofs.go b/plugins/leofs/leofs.go index 32e5ee99a..5a1df7c56 100644 --- a/plugins/leofs/leofs.go +++ b/plugins/leofs/leofs.go @@ -197,6 +197,8 @@ func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins "node": nodeNameTrimmed, } i := 0 + + fields := make(map[string]interface{}) for scanner.Scan() { key := KeyMapping[serverType][i] val, err := retrieveTokenAfterColon(scanner.Text()) @@ -207,9 +209,10 @@ func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins if err != nil { return fmt.Errorf("Unable to parse the value:%s, err:%s", val, err) } - acc.Add(key, fVal, tags) + fields[key] = fVal i++ } + acc.AddFields("leofs", fields, tags) return nil } diff --git a/plugins/lustre2/lustre2.go b/plugins/lustre2/lustre2.go index 57217ec06..64af3050b 100644 --- a/plugins/lustre2/lustre2.go +++ b/plugins/lustre2/lustre2.go @@ -149,19 +149,19 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, return err } + fields := make(map[string]interface{}) for _, line := range lines { - fields := strings.Fields(line) - + parts := strings.Fields(line) for _, wanted := range wanted_fields { var data uint64 - if fields[0] == wanted.inProc { + if parts[0] == wanted.inProc { wanted_field := wanted.field // if not set, assume field[1]. Shouldn't be field[0], as // that's a string if wanted_field == 0 { wanted_field = 1 } - data, err = strconv.ParseUint((fields[wanted_field]), 10, 64) + data, err = strconv.ParseUint((parts[wanted_field]), 10, 64) if err != nil { return err } @@ -169,11 +169,11 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, if wanted.reportAs != "" { report_name = wanted.reportAs } - acc.Add(report_name, data, tags) - + fields[report_name] = data } } } + acc.AddFields("lustre2", fields) } return nil } From ce7b48143a92647b7d9d44fbf50b0a496f30799a Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Dec 2015 17:17:59 -0600 Subject: [PATCH 007/103] 0.3.0: mailchimp & memcached --- plugins/lustre2/lustre2.go | 2 +- plugins/mailchimp/mailchimp.go | 61 ++++++++++++++++++---------------- plugins/memcached/memcached.go | 6 ++-- 3 files changed, 37 insertions(+), 32 deletions(-) diff --git a/plugins/lustre2/lustre2.go b/plugins/lustre2/lustre2.go index 64af3050b..29cd06acf 100644 --- a/plugins/lustre2/lustre2.go +++ b/plugins/lustre2/lustre2.go @@ -173,7 +173,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, } } } - acc.AddFields("lustre2", fields) + acc.AddFields("lustre2", fields, tags) } return nil } diff --git a/plugins/mailchimp/mailchimp.go b/plugins/mailchimp/mailchimp.go index c1c7027f0..d26d479af 100644 --- a/plugins/mailchimp/mailchimp.go +++ b/plugins/mailchimp/mailchimp.go @@ -75,35 +75,38 @@ func gatherReport(acc plugins.Accumulator, report Report, now time.Time) { tags := make(map[string]string) tags["id"] = report.ID tags["campaign_title"] = report.CampaignTitle - acc.Add("emails_sent", report.EmailsSent, tags, now) - acc.Add("abuse_reports", report.AbuseReports, tags, now) - acc.Add("unsubscribed", report.Unsubscribed, tags, now) - acc.Add("hard_bounces", report.Bounces.HardBounces, tags, now) - acc.Add("soft_bounces", report.Bounces.SoftBounces, tags, now) - acc.Add("syntax_errors", report.Bounces.SyntaxErrors, tags, now) - acc.Add("forwards_count", report.Forwards.ForwardsCount, tags, now) - acc.Add("forwards_opens", report.Forwards.ForwardsOpens, tags, now) - acc.Add("opens_total", report.Opens.OpensTotal, tags, now) - acc.Add("unique_opens", report.Opens.UniqueOpens, tags, now) - acc.Add("open_rate", report.Opens.OpenRate, tags, now) - acc.Add("clicks_total", report.Clicks.ClicksTotal, tags, now) - acc.Add("unique_clicks", report.Clicks.UniqueClicks, tags, now) - acc.Add("unique_subscriber_clicks", report.Clicks.UniqueSubscriberClicks, tags, now) - acc.Add("click_rate", report.Clicks.ClickRate, tags, now) - acc.Add("facebook_recipient_likes", report.FacebookLikes.RecipientLikes, tags, now) - acc.Add("facebook_unique_likes", report.FacebookLikes.UniqueLikes, tags, now) - acc.Add("facebook_likes", report.FacebookLikes.FacebookLikes, tags, now) - acc.Add("industry_type", report.IndustryStats.Type, tags, now) - acc.Add("industry_open_rate", report.IndustryStats.OpenRate, tags, now) - acc.Add("industry_click_rate", report.IndustryStats.ClickRate, tags, now) - acc.Add("industry_bounce_rate", report.IndustryStats.BounceRate, tags, now) - acc.Add("industry_unopen_rate", report.IndustryStats.UnopenRate, tags, now) - acc.Add("industry_unsub_rate", report.IndustryStats.UnsubRate, tags, now) - acc.Add("industry_abuse_rate", report.IndustryStats.AbuseRate, tags, now) - acc.Add("list_stats_sub_rate", report.ListStats.SubRate, tags, now) - acc.Add("list_stats_unsub_rate", report.ListStats.UnsubRate, tags, now) - acc.Add("list_stats_open_rate", report.ListStats.OpenRate, tags, now) - acc.Add("list_stats_click_rate", report.ListStats.ClickRate, tags, now) + fields := map[string]interface{}{ + "emails_sent": report.EmailsSent, + "abuse_reports": report.AbuseReports, + "unsubscribed": report.Unsubscribed, + "hard_bounces": report.Bounces.HardBounces, + "soft_bounces": report.Bounces.SoftBounces, + "syntax_errors": report.Bounces.SyntaxErrors, + "forwards_count": report.Forwards.ForwardsCount, + "forwards_opens": report.Forwards.ForwardsOpens, + "opens_total": report.Opens.OpensTotal, + "unique_opens": report.Opens.UniqueOpens, + "open_rate": report.Opens.OpenRate, + "clicks_total": report.Clicks.ClicksTotal, + "unique_clicks": report.Clicks.UniqueClicks, + "unique_subscriber_clicks": report.Clicks.UniqueSubscriberClicks, + "click_rate": report.Clicks.ClickRate, + "facebook_recipient_likes": report.FacebookLikes.RecipientLikes, + "facebook_unique_likes": report.FacebookLikes.UniqueLikes, + "facebook_likes": report.FacebookLikes.FacebookLikes, + "industry_type": report.IndustryStats.Type, + "industry_open_rate": report.IndustryStats.OpenRate, + "industry_click_rate": report.IndustryStats.ClickRate, + "industry_bounce_rate": report.IndustryStats.BounceRate, + "industry_unopen_rate": report.IndustryStats.UnopenRate, + "industry_unsub_rate": report.IndustryStats.UnsubRate, + "industry_abuse_rate": report.IndustryStats.AbuseRate, + "list_stats_sub_rate": report.ListStats.SubRate, + "list_stats_unsub_rate": report.ListStats.UnsubRate, + "list_stats_open_rate": report.ListStats.OpenRate, + "list_stats_click_rate": report.ListStats.ClickRate, + } + acc.AddFields("mailchimp", fields, tags, now) } func init() { diff --git a/plugins/memcached/memcached.go b/plugins/memcached/memcached.go index 9919b0c24..5aba8c9fb 100644 --- a/plugins/memcached/memcached.go +++ b/plugins/memcached/memcached.go @@ -137,16 +137,18 @@ func (m *Memcached) gatherServer( tags := map[string]string{"server": address} // Process values + fields := make(map[string]interface{}) for _, key := range sendMetrics { if value, ok := values[key]; ok { // Mostly it is the number if iValue, errParse := strconv.ParseInt(value, 10, 64); errParse != nil { - acc.Add(key, value, tags) + fields[key] = iValue } else { - acc.Add(key, iValue, tags) + fields[key] = value } } } + acc.AddFields("memcached", fields, tags) return nil } From 92343d91d620f801af2bc5aad13aa649680ade0e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Dec 2015 18:03:33 -0600 Subject: [PATCH 008/103] 0.3.0: ping, mysql, nginx --- plugins/mysql/mysql.go | 15 +++++++++------ plugins/nginx/nginx.go | 18 ++++++++++-------- plugins/ping/ping.go | 11 +++++++---- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/plugins/mysql/mysql.go b/plugins/mysql/mysql.go index 5193f078f..db99123ff 100644 --- a/plugins/mysql/mysql.go +++ b/plugins/mysql/mysql.go @@ -138,6 +138,8 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { if err != nil { servtag = "localhost" } + tags := map[string]string{"server": servtag} + fields := make(map[string]interface{}) for rows.Next() { var name string var val interface{} @@ -149,12 +151,10 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { var found bool - tags := map[string]string{"server": servtag} - for _, mapped := range mappings { if strings.HasPrefix(name, mapped.onServer) { i, _ := strconv.Atoi(string(val.([]byte))) - acc.Add(mapped.inExport+name[len(mapped.onServer):], i, tags) + fields[mapped.inExport+name[len(mapped.onServer):]] = i found = true } } @@ -170,16 +170,17 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { return err } - acc.Add("queries", i, tags) + fields["queries"] = i case "Slow_queries": i, err := strconv.ParseInt(string(val.([]byte)), 10, 64) if err != nil { return err } - acc.Add("slow_queries", i, tags) + fields["slow_queries"] = i } } + acc.AddFields("mysql", fields, tags) conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") @@ -193,11 +194,13 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { } tags := map[string]string{"server": servtag, "user": user} + fields := make(map[string]interface{}) if err != nil { return err } - acc.Add("connections", connections, tags) + fields["connections"] = connections + acc.AddFields("mysql_users", fields, tags) } return nil diff --git a/plugins/nginx/nginx.go b/plugins/nginx/nginx.go index 2c30ea671..5d7aace2a 100644 --- a/plugins/nginx/nginx.go +++ b/plugins/nginx/nginx.go @@ -127,14 +127,16 @@ func (n *Nginx) gatherUrl(addr *url.URL, acc plugins.Accumulator) error { } tags := getTags(addr) - - acc.Add("active", active, tags) - acc.Add("accepts", accepts, tags) - acc.Add("handled", handled, tags) - acc.Add("requests", requests, tags) - acc.Add("reading", reading, tags) - acc.Add("writing", writing, tags) - acc.Add("waiting", waiting, tags) + fields := map[string]interface{}{ + "active": active, + "accepts": accepts, + "handled": handled, + "requests": requests, + "reading": reading, + "writing": writing, + "waiting": waiting, + } + acc.AddFields("nginx", fields, tags) return nil } diff --git a/plugins/ping/ping.go b/plugins/ping/ping.go index e894fe297..72eee91ad 100644 --- a/plugins/ping/ping.go +++ b/plugins/ping/ping.go @@ -82,10 +82,13 @@ func (p *Ping) Gather(acc plugins.Accumulator) error { } // Calculate packet loss percentage loss := float64(trans-rec) / float64(trans) * 100.0 - acc.Add("packets_transmitted", trans, tags) - acc.Add("packets_received", rec, tags) - acc.Add("percent_packet_loss", loss, tags) - acc.Add("average_response_ms", avg, tags) + fields := map[string]interface{}{ + "packets_transmitted": trans, + "packets_received": rec, + "percent_packet_loss": loss, + "average_response_ms": avg, + } + acc.AddFields("ping", fields, tags) }(url, acc) } From 2749dcd12888ea77ac9ea9f32ca5b7ac530545b0 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 14 Dec 2015 23:37:25 -0600 Subject: [PATCH 009/103] 0.3.0: procstat --- CHANGELOG.md | 2 ++ plugins/procstat/spec_processor.go | 35 +++++++++++++++++++----------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d3ba9574c..b446f170a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,8 @@ ### Release Notes - **breaking change** the `io` plugin has been renamed `diskio` - **breaking change** Plugin measurements aggregated into a single measurement. +- `procstat` cpu measurements are now prepended with `cpu_time_` instead of +only `cpu_` ### Features - Plugin measurements aggregated into a single measurement. diff --git a/plugins/procstat/spec_processor.go b/plugins/procstat/spec_processor.go index ede14549a..a61152c9b 100644 --- a/plugins/procstat/spec_processor.go +++ b/plugins/procstat/spec_processor.go @@ -12,6 +12,7 @@ import ( type SpecProcessor struct { Prefix string tags map[string]string + fields map[string]interface{} acc plugins.Accumulator proc *process.Process } @@ -23,7 +24,12 @@ func (p *SpecProcessor) add(metric string, value interface{}) { } else { mname = p.Prefix + "_" + metric } - p.acc.Add(mname, value, p.tags) + p.fields[mname] = value +} + +func (p *SpecProcessor) flush() { + p.acc.AddFields("procstat", p.fields, p.tags) + p.fields = make(map[string]interface{}) } func NewSpecProcessor( @@ -39,6 +45,7 @@ func NewSpecProcessor( return &SpecProcessor{ Prefix: prefix, tags: tags, + fields: make(map[string]interface{}), acc: acc, proc: p, } @@ -60,6 +67,7 @@ func (p *SpecProcessor) pushMetrics() { if err := p.pushMemoryStats(); err != nil { log.Printf("procstat, mem stats not available: %s", err.Error()) } + p.flush() } func (p *SpecProcessor) pushFDStats() error { @@ -94,21 +102,22 @@ func (p *SpecProcessor) pushIOStats() error { } func (p *SpecProcessor) pushCPUStats() error { - cpu, err := p.proc.CPUTimes() + cpu_time, err := p.proc.CPUTimes() if err != nil { return err } - p.add("cpu_user", cpu.User) - p.add("cpu_system", cpu.System) - p.add("cpu_idle", cpu.Idle) - p.add("cpu_nice", cpu.Nice) - p.add("cpu_iowait", cpu.Iowait) - p.add("cpu_irq", cpu.Irq) - p.add("cpu_soft_irq", cpu.Softirq) - p.add("cpu_soft_steal", cpu.Steal) - p.add("cpu_soft_stolen", cpu.Stolen) - p.add("cpu_soft_guest", cpu.Guest) - p.add("cpu_soft_guest_nice", cpu.GuestNice) + p.add("cpu_time_user", cpu_time.User) + p.add("cpu_time_system", cpu_time.System) + p.add("cpu_time_idle", cpu_time.Idle) + p.add("cpu_time_nice", cpu_time.Nice) + p.add("cpu_time_iowait", cpu_time.Iowait) + p.add("cpu_time_irq", cpu_time.Irq) + p.add("cpu_time_soft_irq", cpu_time.Softirq) + p.add("cpu_time_soft_steal", cpu_time.Steal) + p.add("cpu_time_soft_stolen", cpu_time.Stolen) + p.add("cpu_time_soft_guest", cpu_time.Guest) + p.add("cpu_time_soft_guest_nice", cpu_time.GuestNice) + return nil } From 7746a2b3cd77937a1dee38f63bc9fa8d0cefde5c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 15 Dec 2015 10:03:22 -0600 Subject: [PATCH 010/103] 0.3.0: prometheus & puppetagent --- CHANGELOG.md | 2 ++ plugins/prometheus/prometheus.go | 4 ++-- plugins/puppetagent/puppetagent.go | 11 ++++++----- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b446f170a..e291a58f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ - **breaking change** Plugin measurements aggregated into a single measurement. - `procstat` cpu measurements are now prepended with `cpu_time_` instead of only `cpu_` +- The prometheus plugin schema has not been changed (measurements have not been +aggregated). ### Features - Plugin measurements aggregated into a single measurement. diff --git a/plugins/prometheus/prometheus.go b/plugins/prometheus/prometheus.go index cb824e3f2..7775b97b6 100644 --- a/plugins/prometheus/prometheus.go +++ b/plugins/prometheus/prometheus.go @@ -80,14 +80,14 @@ func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error { return fmt.Errorf("error getting processing samples for %s: %s", url, err) } for _, sample := range samples { - tags := map[string]string{} + tags := make(map[string]string) for key, value := range sample.Metric { if key == model.MetricNameLabel { continue } tags[string(key)] = string(value) } - acc.Add(string(sample.Metric[model.MetricNameLabel]), + acc.Add("prometheus_"+string(sample.Metric[model.MetricNameLabel]), float64(sample.Value), tags) } } diff --git a/plugins/puppetagent/puppetagent.go b/plugins/puppetagent/puppetagent.go index 67b01dce3..8939e5963 100644 --- a/plugins/puppetagent/puppetagent.go +++ b/plugins/puppetagent/puppetagent.go @@ -104,15 +104,16 @@ func (pa *PuppetAgent) Gather(acc plugins.Accumulator) error { return fmt.Errorf("%s", err) } - structPrinter(&puppetState, acc) + tags := map[string]string{"location": pa.Location} + structPrinter(&puppetState, acc, tags) return nil } -func structPrinter(s *State, acc plugins.Accumulator) { - +func structPrinter(s *State, acc plugins.Accumulator, tags map[string]string) { e := reflect.ValueOf(s).Elem() + fields := make(map[string]interface{}) for tLevelFNum := 0; tLevelFNum < e.NumField(); tLevelFNum++ { name := e.Type().Field(tLevelFNum).Name nameNumField := e.FieldByName(name).NumField() @@ -123,10 +124,10 @@ func structPrinter(s *State, acc plugins.Accumulator) { lname := strings.ToLower(name) lsName := strings.ToLower(sName) - acc.Add(fmt.Sprintf("%s_%s", lname, lsName), sValue, nil) + fields[fmt.Sprintf("%s_%s", lname, lsName)] = sValue } } - + acc.AddFields("puppetagent", fields, tags) } func init() { From 6fcd05b855490908061eb4ad535d6b5ca84f44b2 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 15 Dec 2015 11:08:13 -0600 Subject: [PATCH 011/103] 0.3.0 redis & rabbitmq --- outputs/influxdb/influxdb.go | 2 ++ plugins/rabbitmq/rabbitmq.go | 57 ++++++++++++++++++++---------------- plugins/redis/redis.go | 12 +++++--- 3 files changed, 41 insertions(+), 30 deletions(-) diff --git a/outputs/influxdb/influxdb.go b/outputs/influxdb/influxdb.go index a9fa2edc3..14391884d 100644 --- a/outputs/influxdb/influxdb.go +++ b/outputs/influxdb/influxdb.go @@ -7,6 +7,7 @@ import ( "math/rand" "net/url" "strings" + "time" "github.com/influxdb/influxdb/client/v2" "github.com/influxdb/telegraf/internal" @@ -110,6 +111,7 @@ func (i *InfluxDB) Connect() error { } i.conns = conns + rand.Seed(time.Now().UnixNano()) return nil } diff --git a/plugins/rabbitmq/rabbitmq.go b/plugins/rabbitmq/rabbitmq.go index 27580a13a..7101ab431 100644 --- a/plugins/rabbitmq/rabbitmq.go +++ b/plugins/rabbitmq/rabbitmq.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "strconv" + "time" "github.com/influxdb/telegraf/plugins" ) @@ -199,20 +200,20 @@ func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan if serv.Name != "" { tags["name"] = serv.Name } - - acc.Add("messages", overview.QueueTotals.Messages, tags) - acc.Add("messages_ready", overview.QueueTotals.MessagesReady, tags) - acc.Add("messages_unacked", overview.QueueTotals.MessagesUnacknowledged, tags) - - acc.Add("channels", overview.ObjectTotals.Channels, tags) - acc.Add("connections", overview.ObjectTotals.Connections, tags) - acc.Add("consumers", overview.ObjectTotals.Consumers, tags) - acc.Add("exchanges", overview.ObjectTotals.Exchanges, tags) - acc.Add("queues", overview.ObjectTotals.Queues, tags) - - acc.Add("messages_acked", overview.MessageStats.Ack, tags) - acc.Add("messages_delivered", overview.MessageStats.Deliver, tags) - acc.Add("messages_published", overview.MessageStats.Publish, tags) + fields := map[string]interface{}{ + "messages": overview.QueueTotals.Messages, + "messages_ready": overview.QueueTotals.MessagesReady, + "messages_unacked": overview.QueueTotals.MessagesUnacknowledged, + "channels": overview.ObjectTotals.Channels, + "connections": overview.ObjectTotals.Connections, + "consumers": overview.ObjectTotals.Consumers, + "exchanges": overview.ObjectTotals.Exchanges, + "queues": overview.ObjectTotals.Queues, + "messages_acked": overview.MessageStats.Ack, + "messages_delivered": overview.MessageStats.Deliver, + "messages_published": overview.MessageStats.Publish, + } + acc.AddFields("rabbitmq_overview", fields, tags) errChan <- nil } @@ -225,6 +226,7 @@ func gatherNodes(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan cha errChan <- err return } + now := time.Now() for _, node := range nodes { if !shouldGatherNode(node, serv) { @@ -234,17 +236,20 @@ func gatherNodes(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan cha tags := map[string]string{"url": serv.URL} tags["node"] = node.Name - acc.Add("disk_free", node.DiskFree, tags) - acc.Add("disk_free_limit", node.DiskFreeLimit, tags) - acc.Add("fd_total", node.FdTotal, tags) - acc.Add("fd_used", node.FdUsed, tags) - acc.Add("mem_limit", node.MemLimit, tags) - acc.Add("mem_used", node.MemUsed, tags) - acc.Add("proc_total", node.ProcTotal, tags) - acc.Add("proc_used", node.ProcUsed, tags) - acc.Add("run_queue", node.RunQueue, tags) - acc.Add("sockets_total", node.SocketsTotal, tags) - acc.Add("sockets_used", node.SocketsUsed, tags) + fields := map[string]interface{}{ + "disk_free": node.DiskFree, + "disk_free_limit": node.DiskFreeLimit, + "fd_total": node.FdTotal, + "fd_used": node.FdUsed, + "mem_limit": node.MemLimit, + "mem_used": node.MemUsed, + "proc_total": node.ProcTotal, + "proc_used": node.ProcUsed, + "run_queue": node.RunQueue, + "sockets_total": node.SocketsTotal, + "sockets_used": node.SocketsUsed, + } + acc.AddFields("rabbitmq_node", fields, tags, now) } errChan <- nil @@ -273,7 +278,7 @@ func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan ch } acc.AddFields( - "queue", + "rabbitmq_queue", map[string]interface{}{ // common information "consumers": queue.Consumers, diff --git a/plugins/redis/redis.go b/plugins/redis/redis.go index 151fb4f46..2e338ff19 100644 --- a/plugins/redis/redis.go +++ b/plugins/redis/redis.go @@ -164,6 +164,7 @@ func gatherInfoOutput( var keyspace_hits, keyspace_misses uint64 = 0, 0 scanner := bufio.NewScanner(rdr) + fields := make(map[string]interface{}) for scanner.Scan() { line := scanner.Text() if strings.Contains(line, "ERR") { @@ -199,7 +200,7 @@ func gatherInfoOutput( } if err == nil { - acc.Add(metric, ival, tags) + fields[metric] = ival continue } @@ -208,13 +209,14 @@ func gatherInfoOutput( return err } - acc.Add(metric, fval, tags) + fields[metric] = fval } var keyspace_hitrate float64 = 0.0 if keyspace_hits != 0 || keyspace_misses != 0 { keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses) } - acc.Add("keyspace_hitrate", keyspace_hitrate, tags) + fields["keyspace_hitrate"] = keyspace_hitrate + acc.AddFields("redis", fields, tags) return nil } @@ -229,15 +231,17 @@ func gatherKeyspaceLine( tags map[string]string, ) { if strings.Contains(line, "keys=") { + fields := make(map[string]interface{}) tags["database"] = name dbparts := strings.Split(line, ",") for _, dbp := range dbparts { kv := strings.Split(dbp, "=") ival, err := strconv.ParseUint(kv[1], 10, 64) if err == nil { - acc.Add(kv[0], ival, tags) + fields[kv[0]] = ival } } + acc.AddFields("redis_keyspace", fields, tags) } } From 5f4262921ae18e1881459938b9f290f713b731b5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 15 Dec 2015 12:36:45 -0600 Subject: [PATCH 012/103] 0.3.0: trig and twemproxy --- plugins/trig/trig.go | 1 - plugins/twemproxy/twemproxy.go | 18 +++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/plugins/trig/trig.go b/plugins/trig/trig.go index e966cbd46..7ed2bf3d9 100644 --- a/plugins/trig/trig.go +++ b/plugins/trig/trig.go @@ -41,6 +41,5 @@ func (s *Trig) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("Trig", func() plugins.Plugin { return &Trig{x: 0.0} }) } diff --git a/plugins/twemproxy/twemproxy.go b/plugins/twemproxy/twemproxy.go index 0b1f6139e..1933e8d0d 100644 --- a/plugins/twemproxy/twemproxy.go +++ b/plugins/twemproxy/twemproxy.go @@ -100,21 +100,23 @@ func (ti *TwemproxyInstance) processStat( } } + fields := make(map[string]interface{}) metrics := []string{"total_connections", "curr_connections", "timestamp"} for _, m := range metrics { if value, ok := data[m]; ok { if val, ok := value.(float64); ok { - acc.Add(m, val, tags) + fields[m] = val } } } + acc.AddFields("twemproxy", fields, tags) for _, pool := range ti.Pools { if poolStat, ok := data[pool]; ok { if data, ok := poolStat.(map[string]interface{}); ok { poolTags := copyTags(tags) poolTags["pool"] = pool - ti.processPool(acc, poolTags, pool+"_", data) + ti.processPool(acc, poolTags, data) } } } @@ -124,16 +126,16 @@ func (ti *TwemproxyInstance) processStat( func (ti *TwemproxyInstance) processPool( acc plugins.Accumulator, tags map[string]string, - prefix string, data map[string]interface{}, ) { serverTags := make(map[string]map[string]string) + fields := make(map[string]interface{}) for key, value := range data { switch key { case "client_connections", "forward_error", "client_err", "server_ejects", "fragments", "client_eof": if val, ok := value.(float64); ok { - acc.Add(prefix+key, val, tags) + fields[key] = val } default: if data, ok := value.(map[string]interface{}); ok { @@ -141,27 +143,29 @@ func (ti *TwemproxyInstance) processPool( serverTags[key] = copyTags(tags) serverTags[key]["server"] = key } - ti.processServer(acc, serverTags[key], prefix, data) + ti.processServer(acc, serverTags[key], data) } } } + acc.AddFields("twemproxy_pool", fields, tags) } // Process backend server(redis/memcached) stats func (ti *TwemproxyInstance) processServer( acc plugins.Accumulator, tags map[string]string, - prefix string, data map[string]interface{}, ) { + fields := make(map[string]interface{}) for key, value := range data { switch key { default: if val, ok := value.(float64); ok { - acc.Add(prefix+key, val, tags) + fields[key] = val } } } + acc.AddFields("twemproxy_pool", fields, tags) } // Tags is not expected to be mutated after passing to Add. From a34418d724f5e4b7ccd2d3d9a51f5261cff59436 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 15 Dec 2015 14:21:02 -0600 Subject: [PATCH 013/103] backwards compatability for io->diskio change --- internal/config/config.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/config/config.go b/internal/config/config.go index 0270a3913..9275fa177 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -531,6 +531,11 @@ func (c *Config) addPlugin(name string, table *ast.Table) error { if len(c.PluginFilters) > 0 && !sliceContains(name, c.PluginFilters) { return nil } + // Legacy support renaming io plugin to diskio + if name == "io" { + name = "diskio" + } + creator, ok := plugins.Plugins[name] if !ok { return fmt.Errorf("Undefined but requested plugin: %s", name) From 5aca58ad2aef3f53088568fcf54b5515e244b4fe Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 15 Dec 2015 15:11:23 -0600 Subject: [PATCH 014/103] 0.3.0: zookeeper and zfs --- plugins/zfs/zfs.go | 10 ++++++---- plugins/zookeeper/zookeeper.go | 20 +++++++++++--------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/plugins/zfs/zfs.go b/plugins/zfs/zfs.go index f655c4abf..3594f670b 100644 --- a/plugins/zfs/zfs.go +++ b/plugins/zfs/zfs.go @@ -88,15 +88,15 @@ func gatherPoolStats(pool poolInfo, acc plugins.Accumulator) error { } tag := map[string]string{"pool": pool.name} - + fields := make(map[string]interface{}) for i := 0; i < keyCount; i++ { value, err := strconv.ParseInt(values[i], 10, 64) if err != nil { return err } - - acc.Add(keys[i], value, tag) + fields[keys[i]] = value } + acc.AddFields("zfs_pool", fields, tag) return nil } @@ -124,6 +124,7 @@ func (z *Zfs) Gather(acc plugins.Accumulator) error { } } + fields := make(map[string]interface{}) for _, metric := range kstatMetrics { lines, err := internal.ReadLines(kstatPath + "/" + metric) if err != nil { @@ -140,9 +141,10 @@ func (z *Zfs) Gather(acc plugins.Accumulator) error { key := metric + "_" + rawData[0] rawValue := rawData[len(rawData)-1] value, _ := strconv.ParseInt(rawValue, 10, 64) - acc.Add(key, value, tags) + fields[key] = value } } + acc.AddFields("zfs", fields, tags) return nil } diff --git a/plugins/zookeeper/zookeeper.go b/plugins/zookeeper/zookeeper.go index 395bd3fdd..342bace2c 100644 --- a/plugins/zookeeper/zookeeper.go +++ b/plugins/zookeeper/zookeeper.go @@ -67,35 +67,37 @@ func (z *Zookeeper) gatherServer(address string, acc plugins.Accumulator) error defer c.Close() fmt.Fprintf(c, "%s\n", "mntr") - rdr := bufio.NewReader(c) - scanner := bufio.NewScanner(rdr) + service := strings.Split(address, ":") + if len(service) != 2 { + return fmt.Errorf("Invalid service address: %s", address) + } + tags := map[string]string{"server": service[0], "port": service[1]} + + fields := make(map[string]interface{}) for scanner.Scan() { line := scanner.Text() re := regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`) parts := re.FindStringSubmatch(string(line)) - service := strings.Split(address, ":") - - if len(parts) != 3 || len(service) != 2 { + if len(parts) != 3 { return fmt.Errorf("unexpected line in mntr response: %q", line) } - tags := map[string]string{"server": service[0], "port": service[1]} - measurement := strings.TrimPrefix(parts[1], "zk_") sValue := string(parts[2]) iVal, err := strconv.ParseInt(sValue, 10, 64) if err == nil { - acc.Add(measurement, iVal, tags) + fields[measurement] = iVal } else { - acc.Add(measurement, sValue, tags) + fields[measurement] = sValue } } + acc.AddFields("zookeeper", fields, tags) return nil } From aba123dae017b5428a92801d87b06de308c21748 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 15 Dec 2015 15:41:01 -0600 Subject: [PATCH 015/103] 0.3.0: rethinkdb --- plugins/rethinkdb/rethinkdb_data.go | 35 ++++++++++++++++------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/plugins/rethinkdb/rethinkdb_data.go b/plugins/rethinkdb/rethinkdb_data.go index 5fae28931..59abd83ba 100644 --- a/plugins/rethinkdb/rethinkdb_data.go +++ b/plugins/rethinkdb/rethinkdb_data.go @@ -86,25 +86,30 @@ var engineStats = map[string]string{ "total_writes": "TotalWrites", } -func (e *Engine) AddEngineStats(keys []string, acc plugins.Accumulator, tags map[string]string) { +func (e *Engine) AddEngineStats( + keys []string, + acc plugins.Accumulator, + tags map[string]string, +) { engine := reflect.ValueOf(e).Elem() + fields := make(map[string]interface{}) for _, key := range keys { - acc.Add( - key, - engine.FieldByName(engineStats[key]).Interface(), - tags, - ) + fields[key] = engine.FieldByName(engineStats[key]).Interface() } + acc.AddFields("rethinkdb_engine", fields, tags) } func (s *Storage) AddStats(acc plugins.Accumulator, tags map[string]string) { - acc.Add("cache_bytes_in_use", s.Cache.BytesInUse, tags) - acc.Add("disk_read_bytes_per_sec", s.Disk.ReadBytesPerSec, tags) - acc.Add("disk_read_bytes_total", s.Disk.ReadBytesTotal, tags) - acc.Add("disk_written_bytes_per_sec", s.Disk.WriteBytesPerSec, tags) - acc.Add("disk_written_bytes_total", s.Disk.WriteBytesTotal, tags) - acc.Add("disk_usage_data_bytes", s.Disk.SpaceUsage.Data, tags) - acc.Add("disk_usage_garbage_bytes", s.Disk.SpaceUsage.Garbage, tags) - acc.Add("disk_usage_metadata_bytes", s.Disk.SpaceUsage.Metadata, tags) - acc.Add("disk_usage_preallocated_bytes", s.Disk.SpaceUsage.Prealloc, tags) + fields := map[string]interface{}{ + "cache_bytes_in_use": s.Cache.BytesInUse, + "disk_read_bytes_per_sec": s.Disk.ReadBytesPerSec, + "disk_read_bytes_total": s.Disk.ReadBytesTotal, + "disk_written_bytes_per_sec": s.Disk.WriteBytesPerSec, + "disk_written_bytes_total": s.Disk.WriteBytesTotal, + "disk_usage_data_bytes": s.Disk.SpaceUsage.Data, + "disk_usage_garbage_bytes": s.Disk.SpaceUsage.Garbage, + "disk_usage_metadata_bytes": s.Disk.SpaceUsage.Metadata, + "disk_usage_preallocated_bytes": s.Disk.SpaceUsage.Prealloc, + } + acc.AddFields("rethinkdb", fields, tags) } From 9c5321c53853e3629319f6fecc1e7052a0d65577 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 18 Dec 2015 16:32:45 -0700 Subject: [PATCH 016/103] 0.3.0 HAProxy rebase --- plugins/haproxy/haproxy.go | 93 ++++++++++++++++++-------------------- 1 file changed, 44 insertions(+), 49 deletions(-) diff --git a/plugins/haproxy/haproxy.go b/plugins/haproxy/haproxy.go index 88a74a9ad..93d3f6c6c 100644 --- a/plugins/haproxy/haproxy.go +++ b/plugins/haproxy/haproxy.go @@ -138,8 +138,7 @@ func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error { return fmt.Errorf("Unable parse server address '%s': %s", addr, err) } - req, err := http.NewRequest("GET", - fmt.Sprintf("%s://%s%s/;csv", u.Scheme, u.Host, u.Path), nil) + req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s/;csv", u.Scheme, u.Host, u.Path), nil) if u.User != nil { p, _ := u.User.Password() req.SetBasicAuth(u.User.Username(), p) @@ -163,13 +162,14 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error { now := time.Now() for _, row := range result { - fields := make(map[string]interface{}) - tags := map[string]string{ - "server": host, - "proxy": row[HF_PXNAME], - "sv": row[HF_SVNAME], - } + for field, v := range row { + fields := make(map[string]interface{}) + tags := map[string]string{ + "server": host, + "proxy": row[HF_PXNAME], + "sv": row[HF_SVNAME], + } switch field { case HF_QCUR: ival, err := strconv.ParseUint(v, 10, 64) @@ -194,7 +194,7 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error { case HF_STOT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("stot", ival, tags) + fields["stot"] = ival } case HF_BIN: ival, err := strconv.ParseUint(v, 10, 64) @@ -219,52 +219,52 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error { case HF_EREQ: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("ereq", ival, tags) + fields["ereq"] = ival } case HF_ECON: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("econ", ival, tags) + fields["econ"] = ival } case HF_ERESP: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("eresp", ival, tags) + fields["eresp"] = ival } case HF_WRETR: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("wretr", ival, tags) + fields["wretr"] = ival } case HF_WREDIS: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("wredis", ival, tags) + fields["wredis"] = ival } case HF_ACT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("active_servers", ival, tags) + fields["active_servers"] = ival } case HF_BCK: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("backup_servers", ival, tags) + fields["backup_servers"] = ival } case HF_DOWNTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("downtime", ival, tags) + fields["downtime"] = ival } case HF_THROTTLE: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("throttle", ival, tags) + fields["throttle"] = ival } case HF_LBTOT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - acc.Add("lbtot", ival, tags) + fields["lbtot"] = ival } case HF_RATE: ival, err := strconv.ParseUint(v, 10, 64) @@ -279,7 +279,7 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error { case HF_CHECK_DURATION: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - fields["stot"] = ival + fields["check_duration"] = ival } case HF_HRSP_1xx: ival, err := strconv.ParseUint(v, 10, 64) @@ -306,55 +306,50 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error { if err == nil { fields["http_response.5xx"] = ival } - case HF_EREQ: - ival, err := strconv.ParseUint(v, 10, 64) - if err == nil { - fields["ereq"] = ival - } case HF_REQ_RATE: - ival, err := strconv.ParseUint(v, 10, 64) - if err == nil { - fields["eresp"] = ival - } - case HF_ECON: - ival, err := strconv.ParseUint(v, 10, 64) - if err == nil { - fields["econ"] = ival - } - case HF_WRETR: - ival, err := strconv.ParseUint(v, 10, 64) - if err == nil { - fields["wretr"] = ival - } - case HF_CLI_ABRT: - ival, err := strconv.ParseUint(v, 10, 64) - if err == nil { - fields["wredis"] = ival - } - case HF_SRV_ABRT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { fields["req_rate"] = ival } - case HF_QTIME: + case HF_REQ_RATE_MAX: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { fields["req_rate_max"] = ival } - case HF_CTIME: + case HF_REQ_TOT: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { fields["req_tot"] = ival } + case HF_CLI_ABRT: + ival, err := strconv.ParseUint(v, 10, 64) + if err == nil { + fields["cli_abort"] = ival + } + case HF_SRV_ABRT: + ival, err := strconv.ParseUint(v, 10, 64) + if err == nil { + fields["srv_abort"] = ival + } + case HF_QTIME: + ival, err := strconv.ParseUint(v, 10, 64) + if err == nil { + fields["qtime"] = ival + } + case HF_CTIME: + ival, err := strconv.ParseUint(v, 10, 64) + if err == nil { + fields["ctime"] = ival + } case HF_RTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - fields["throttle"] = ival + fields["rtime"] = ival } case HF_TTIME: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { - fields["lbtot"] = ival + fields["ttime"] = ival } } } From 64a832467e7da590e53026fc22c888534babc526 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 18 Dec 2015 17:09:01 -0700 Subject: [PATCH 017/103] 0.3.0: postgresql and phpfpm --- plugins/haproxy/haproxy.go | 13 ++++++------- plugins/phpfpm/phpfpm.go | 4 +++- plugins/postgresql/postgresql.go | 6 ++++-- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/plugins/haproxy/haproxy.go b/plugins/haproxy/haproxy.go index 93d3f6c6c..2069af249 100644 --- a/plugins/haproxy/haproxy.go +++ b/plugins/haproxy/haproxy.go @@ -162,14 +162,13 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error { now := time.Now() for _, row := range result { - + fields := make(map[string]interface{}) + tags := map[string]string{ + "server": host, + "proxy": row[HF_PXNAME], + "sv": row[HF_SVNAME], + } for field, v := range row { - fields := make(map[string]interface{}) - tags := map[string]string{ - "server": host, - "proxy": row[HF_PXNAME], - "sv": row[HF_SVNAME], - } switch field { case HF_QCUR: ival, err := strconv.ParseUint(v, 10, 64) diff --git a/plugins/phpfpm/phpfpm.go b/plugins/phpfpm/phpfpm.go index 8408c86f7..2f2164913 100644 --- a/plugins/phpfpm/phpfpm.go +++ b/plugins/phpfpm/phpfpm.go @@ -198,9 +198,11 @@ func importMetric(r io.Reader, acc plugins.Accumulator, host string) (poolStat, "url": host, "pool": pool, } + fields := make(map[string]interface{}) for k, v := range stats[pool] { - acc.Add(strings.Replace(k, " ", "_", -1), v, tags) + fields[strings.Replace(k, " ", "_", -1)] = v } + acc.AddFields("phpfpm", fields, tags) } return stats, nil diff --git a/plugins/postgresql/postgresql.go b/plugins/postgresql/postgresql.go index a31a9b4d2..5da776cbd 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/postgresql/postgresql.go @@ -42,7 +42,7 @@ var sampleConfig = ` # to grab metrics for. # - address = "sslmode=disable" + address = "host=localhost user=postgres sslmode=disable" # A list of databases to pull metrics about. If not specified, metrics for all # databases are gathered. @@ -161,12 +161,14 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) tags := map[string]string{"server": serv.Address, "db": dbname.String()} + fields := make(map[string]interface{}) for col, val := range columnMap { _, ignore := ignoredColumns[col] if !ignore { - acc.Add(col, *val, tags) + fields[col] = *val } } + acc.AddFields("postgresql", fields, tags) return nil } From 30d8ed411a024100777a2549d830dfe69664495b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 13:31:22 -0700 Subject: [PATCH 018/103] 0.3.0: mongodb and jolokia --- plugins/jolokia/jolokia.go | 84 ++++++------------------------- plugins/mongodb/mongodb.go | 3 +- plugins/mongodb/mongodb_data.go | 32 +++++++----- plugins/mongodb/mongodb_server.go | 3 +- 4 files changed, 38 insertions(+), 84 deletions(-) diff --git a/plugins/jolokia/jolokia.go b/plugins/jolokia/jolokia.go index ee579433a..c3241a892 100644 --- a/plugins/jolokia/jolokia.go +++ b/plugins/jolokia/jolokia.go @@ -7,7 +7,6 @@ import ( "io/ioutil" "net/http" "net/url" - "strings" "github.com/influxdb/telegraf/plugins" ) @@ -23,8 +22,6 @@ type Server struct { type Metric struct { Name string Jmx string - Pass []string - Drop []string } type JolokiaClient interface { @@ -44,7 +41,6 @@ type Jolokia struct { Context string Servers []Server Metrics []Metric - Tags map[string]string } func (j *Jolokia) SampleConfig() string { @@ -52,10 +48,6 @@ func (j *Jolokia) SampleConfig() string { # This is the context root used to compose the jolokia url context = "/jolokia/read" - # Tags added to each measurements - [jolokia.tags] - group = "as" - # List of servers exposing jolokia read service [[plugins.jolokia.servers]] name = "stable" @@ -76,17 +68,11 @@ func (j *Jolokia) SampleConfig() string { [[plugins.jolokia.metrics]] name = "memory_eden" jmx = "/java.lang:type=MemoryPool,name=PS Eden Space/Usage" - drop = [ "committed" ] - # This passes only DaemonThreadCount and ThreadCount [[plugins.jolokia.metrics]] name = "heap_threads" jmx = "/java.lang:type=Threading" - pass = [ - "DaemonThreadCount", - "ThreadCount" - ] ` } @@ -100,12 +86,9 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { if err != nil { return nil, err } + defer req.Body.Close() resp, err := j.jClient.MakeRequest(req) - if err != nil { - return nil, err - } - if err != nil { return nil, err } @@ -137,65 +120,22 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { return jsonOut, nil } -func (m *Metric) shouldPass(field string) bool { - - if m.Pass != nil { - - for _, pass := range m.Pass { - if strings.HasPrefix(field, pass) { - return true - } - } - - return false - } - - if m.Drop != nil { - - for _, drop := range m.Drop { - if strings.HasPrefix(field, drop) { - return false - } - } - - return true - } - - return true -} - -func (m *Metric) filterFields(fields map[string]interface{}) map[string]interface{} { - - for field, _ := range fields { - if !m.shouldPass(field) { - delete(fields, field) - } - } - - return fields -} - func (j *Jolokia) Gather(acc plugins.Accumulator) error { - context := j.Context //"/jolokia/read" servers := j.Servers metrics := j.Metrics - tags := j.Tags - - if tags == nil { - tags = map[string]string{} - } + tags := make(map[string]string) for _, server := range servers { + tags["server"] = server.Name + tags["port"] = server.Port + tags["host"] = server.Host + fields := make(map[string]interface{}) for _, metric := range metrics { measurement := metric.Name jmxPath := metric.Jmx - tags["server"] = server.Name - tags["port"] = server.Port - tags["host"] = server.Host - // Prepare URL requestUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context + jmxPath) @@ -209,16 +149,20 @@ func (j *Jolokia) Gather(acc plugins.Accumulator) error { out, _ := j.getAttr(requestUrl) if values, ok := out["value"]; ok { - switch values.(type) { + switch t := values.(type) { case map[string]interface{}: - acc.AddFields(measurement, metric.filterFields(values.(map[string]interface{})), tags) + for k, v := range t { + fields[measurement+"_"+k] = v + } case interface{}: - acc.Add(measurement, values.(interface{}), tags) + fields[measurement] = t } } else { - fmt.Printf("Missing key 'value' in '%s' output response\n", requestUrl.String()) + fmt.Printf("Missing key 'value' in '%s' output response\n", + requestUrl.String()) } } + acc.AddFields("jolokia", fields, tags) } return nil diff --git a/plugins/mongodb/mongodb.go b/plugins/mongodb/mongodb.go index 87882a341..40c77931a 100644 --- a/plugins/mongodb/mongodb.go +++ b/plugins/mongodb/mongodb.go @@ -98,7 +98,8 @@ func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error { } dialInfo, err := mgo.ParseURL(dialAddrs[0]) if err != nil { - return fmt.Errorf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error()) + return fmt.Errorf("Unable to parse URL (%s), %s\n", + dialAddrs[0], err.Error()) } dialInfo.Direct = true dialInfo.Timeout = time.Duration(10) * time.Second diff --git a/plugins/mongodb/mongodb_data.go b/plugins/mongodb/mongodb_data.go index fda1843bb..1ebb76ced 100644 --- a/plugins/mongodb/mongodb_data.go +++ b/plugins/mongodb/mongodb_data.go @@ -10,6 +10,7 @@ import ( type MongodbData struct { StatLine *StatLine + Fields map[string]interface{} Tags map[string]string } @@ -20,6 +21,7 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { return &MongodbData{ StatLine: statLine, Tags: tags, + Fields: make(map[string]interface{}), } } @@ -63,38 +65,44 @@ var WiredTigerStats = map[string]string{ "percent_cache_used": "CacheUsedPercent", } -func (d *MongodbData) AddDefaultStats(acc plugins.Accumulator) { +func (d *MongodbData) AddDefaultStats() { statLine := reflect.ValueOf(d.StatLine).Elem() - d.addStat(acc, statLine, DefaultStats) + d.addStat(statLine, DefaultStats) if d.StatLine.NodeType != "" { - d.addStat(acc, statLine, DefaultReplStats) + d.addStat(statLine, DefaultReplStats) } if d.StatLine.StorageEngine == "mmapv1" { - d.addStat(acc, statLine, MmapStats) + d.addStat(statLine, MmapStats) } else if d.StatLine.StorageEngine == "wiredTiger" { for key, value := range WiredTigerStats { val := statLine.FieldByName(value).Interface() percentVal := fmt.Sprintf("%.1f", val.(float64)*100) floatVal, _ := strconv.ParseFloat(percentVal, 64) - d.add(acc, key, floatVal) + d.add(key, floatVal) } } } -func (d *MongodbData) addStat(acc plugins.Accumulator, statLine reflect.Value, stats map[string]string) { +func (d *MongodbData) addStat( + statLine reflect.Value, + stats map[string]string, +) { for key, value := range stats { val := statLine.FieldByName(value).Interface() - d.add(acc, key, val) + d.add(key, val) } } -func (d *MongodbData) add(acc plugins.Accumulator, key string, val interface{}) { +func (d *MongodbData) add(key string, val interface{}) { + d.Fields[key] = val +} + +func (d *MongodbData) flush(acc plugins.Accumulator) { acc.AddFields( - key, - map[string]interface{}{ - "value": val, - }, + "mongodb", + d.Fields, d.Tags, d.StatLine.Time, ) + d.Fields = make(map[string]interface{}) } diff --git a/plugins/mongodb/mongodb_server.go b/plugins/mongodb/mongodb_server.go index d9b0edaad..134be5bae 100644 --- a/plugins/mongodb/mongodb_server.go +++ b/plugins/mongodb/mongodb_server.go @@ -44,7 +44,8 @@ func (s *Server) gatherData(acc plugins.Accumulator) error { NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds), s.getDefaultTags(), ) - data.AddDefaultStats(acc) + data.AddDefaultStats() + data.flush(acc) } return nil } From ec39d106951a7af09591b4a9f8abaa599e2666d5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 14:08:31 -0700 Subject: [PATCH 019/103] 0.3.0 output: datadog and amon --- outputs/amon/amon.go | 35 ++++++++++++++++++++++------------- outputs/datadog/datadog.go | 37 ++++++++++++++++++++++--------------- 2 files changed, 44 insertions(+), 28 deletions(-) diff --git a/outputs/amon/amon.go b/outputs/amon/amon.go index 2ab068b75..231010baf 100644 --- a/outputs/amon/amon.go +++ b/outputs/amon/amon.go @@ -60,18 +60,23 @@ func (a *Amon) Write(points []*client.Point) error { ts := TimeSeries{} var tempSeries = make([]*Metric, len(points)) var acceptablePoints = 0 + for _, pt := range points { - metric := &Metric{ - Metric: strings.Replace(pt.Name(), "_", ".", -1), - } - if p, err := buildPoint(pt); err == nil { - metric.Points[0] = p - tempSeries[acceptablePoints] = metric - acceptablePoints += 1 + mname := strings.Replace(pt.Name(), "_", ".", -1) + if amonPts, err := buildPoints(pt); err == nil { + for fieldName, amonPt := range amonPts { + metric := &Metric{ + Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1), + } + metric.Points[0] = amonPt + tempSeries[acceptablePoints] = metric + acceptablePoints += 1 + } } else { log.Printf("unable to build Metric for %s, skipping\n", pt.Name()) } } + ts.Series = make([]*Metric, acceptablePoints) copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) @@ -110,13 +115,17 @@ func (a *Amon) authenticatedUrl() string { return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey) } -func buildPoint(pt *client.Point) (Point, error) { - var p Point - if err := p.setValue(pt.Fields()["value"]); err != nil { - return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) +func buildPoints(pt *client.Point) (map[string]Point, error) { + pts := make(map[string]Point) + for k, v := range pt.Fields() { + var p Point + if err := p.setValue(v); err != nil { + return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) + } + p[0] = float64(pt.Time().Unix()) + pts[k] = p } - p[0] = float64(pt.Time().Unix()) - return p, nil + return pts, nil } func (p *Point) setValue(v interface{}) error { diff --git a/outputs/datadog/datadog.go b/outputs/datadog/datadog.go index f37c81a9c..eef0702a2 100644 --- a/outputs/datadog/datadog.go +++ b/outputs/datadog/datadog.go @@ -69,20 +69,23 @@ func (d *Datadog) Write(points []*client.Point) error { ts := TimeSeries{} var tempSeries = make([]*Metric, len(points)) var acceptablePoints = 0 + for _, pt := range points { - metric := &Metric{ - Metric: strings.Replace(pt.Name(), "_", ".", -1), - Tags: buildTags(pt.Tags()), - Host: pt.Tags()["host"], - } - if p, err := buildPoint(pt); err == nil { - metric.Points[0] = p - tempSeries[acceptablePoints] = metric - acceptablePoints += 1 + mname := strings.Replace(pt.Name(), "_", ".", -1) + if amonPts, err := buildPoints(pt); err == nil { + for fieldName, amonPt := range amonPts { + metric := &Metric{ + Metric: mname + strings.Replace(fieldName, "_", ".", -1), + } + metric.Points[0] = amonPt + tempSeries[acceptablePoints] = metric + acceptablePoints += 1 + } } else { log.Printf("unable to build Metric for %s, skipping\n", pt.Name()) } } + ts.Series = make([]*Metric, acceptablePoints) copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) @@ -123,13 +126,17 @@ func (d *Datadog) authenticatedUrl() string { return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode()) } -func buildPoint(pt *client.Point) (Point, error) { - var p Point - if err := p.setValue(pt.Fields()["value"]); err != nil { - return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) +func buildPoints(pt *client.Point) (map[string]Point, error) { + pts := make(map[string]Point) + for k, v := range pt.Fields() { + var p Point + if err := p.setValue(v); err != nil { + return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) + } + p[0] = float64(pt.Time().Unix()) + pts[k] = p } - p[0] = float64(pt.Time().Unix()) - return p, nil + return pts, nil } func buildTags(ptTags map[string]string) []string { From 2611931f8280f023623934031b8a64da93cacb97 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 14:19:43 -0700 Subject: [PATCH 020/103] 0.3.0 output: librato --- outputs/amon/amon.go | 10 ++++---- outputs/datadog/datadog.go | 10 ++++---- outputs/librato/librato.go | 50 +++++++++++++++++++++++--------------- 3 files changed, 40 insertions(+), 30 deletions(-) diff --git a/outputs/amon/amon.go b/outputs/amon/amon.go index 231010baf..84914a27c 100644 --- a/outputs/amon/amon.go +++ b/outputs/amon/amon.go @@ -58,8 +58,8 @@ func (a *Amon) Write(points []*client.Point) error { return nil } ts := TimeSeries{} - var tempSeries = make([]*Metric, len(points)) - var acceptablePoints = 0 + tempSeries := []*Metric{} + metricCounter := 0 for _, pt := range points { mname := strings.Replace(pt.Name(), "_", ".", -1) @@ -69,15 +69,15 @@ func (a *Amon) Write(points []*client.Point) error { Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1), } metric.Points[0] = amonPt - tempSeries[acceptablePoints] = metric - acceptablePoints += 1 + tempSeries = append(tempSeries, metric) + metricCounter++ } } else { log.Printf("unable to build Metric for %s, skipping\n", pt.Name()) } } - ts.Series = make([]*Metric, acceptablePoints) + ts.Series = make([]*Metric, metricCounter) copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) if err != nil { diff --git a/outputs/datadog/datadog.go b/outputs/datadog/datadog.go index eef0702a2..e654f9780 100644 --- a/outputs/datadog/datadog.go +++ b/outputs/datadog/datadog.go @@ -67,8 +67,8 @@ func (d *Datadog) Write(points []*client.Point) error { return nil } ts := TimeSeries{} - var tempSeries = make([]*Metric, len(points)) - var acceptablePoints = 0 + tempSeries := []*Metric{} + metricCounter := 0 for _, pt := range points { mname := strings.Replace(pt.Name(), "_", ".", -1) @@ -78,15 +78,15 @@ func (d *Datadog) Write(points []*client.Point) error { Metric: mname + strings.Replace(fieldName, "_", ".", -1), } metric.Points[0] = amonPt - tempSeries[acceptablePoints] = metric - acceptablePoints += 1 + tempSeries = append(tempSeries, metric) + metricCounter++ } } else { log.Printf("unable to build Metric for %s, skipping\n", pt.Name()) } } - ts.Series = make([]*Metric, acceptablePoints) + ts.Series = make([]*Metric, metricCounter) copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) if err != nil { diff --git a/outputs/librato/librato.go b/outputs/librato/librato.go index 9f8f6dd0d..a653ce196 100644 --- a/outputs/librato/librato.go +++ b/outputs/librato/librato.go @@ -74,17 +74,21 @@ func (l *Librato) Write(points []*client.Point) error { return nil } metrics := Metrics{} - var tempGauges = make([]*Gauge, len(points)) - var acceptablePoints = 0 + tempGauges := []*Gauge{} + metricCounter := 0 + for _, pt := range points { - if gauge, err := l.buildGauge(pt); err == nil { - tempGauges[acceptablePoints] = gauge - acceptablePoints += 1 + if gauges, err := l.buildGauges(pt); err == nil { + for _, gauge := range gauges { + tempGauges = append(tempGauges, gauge) + metricCounter++ + } } else { log.Printf("unable to build Gauge for %s, skipping\n", pt.Name()) } } - metrics.Gauges = make([]*Gauge, acceptablePoints) + + metrics.Gauges = make([]*Gauge, metricCounter) copy(metrics.Gauges, tempGauges[0:]) metricsBytes, err := json.Marshal(metrics) if err != nil { @@ -118,22 +122,28 @@ func (l *Librato) Description() string { return "Configuration for Librato API to send metrics to." } -func (l *Librato) buildGauge(pt *client.Point) (*Gauge, error) { - gauge := &Gauge{ - Name: pt.Name(), - MeasureTime: pt.Time().Unix(), - } - if err := gauge.setValue(pt.Fields()["value"]); err != nil { - return gauge, fmt.Errorf("unable to extract value from Fields, %s\n", err.Error()) - } - if l.SourceTag != "" { - if source, ok := pt.Tags()[l.SourceTag]; ok { - gauge.Source = source - } else { - return gauge, fmt.Errorf("undeterminable Source type from Field, %s\n", l.SourceTag) +func (l *Librato) buildGauges(pt *client.Point) ([]*Gauge, error) { + gauges := []*Gauge{} + for fieldName, value := range pt.Fields() { + gauge := &Gauge{ + Name: pt.Name() + "_" + fieldName, + MeasureTime: pt.Time().Unix(), + } + if err := gauge.setValue(value); err != nil { + return gauges, fmt.Errorf("unable to extract value from Fields, %s\n", + err.Error()) + } + if l.SourceTag != "" { + if source, ok := pt.Tags()[l.SourceTag]; ok { + gauge.Source = source + } else { + return gauges, + fmt.Errorf("undeterminable Source type from Field, %s\n", + l.SourceTag) + } } } - return gauge, nil + return gauges, nil } func (g *Gauge) setValue(v interface{}) error { From 40a3feaad02d9a138de61a3e53e491cfcb6c54b8 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 14:30:37 -0700 Subject: [PATCH 021/103] 0.3.0 outputs: opentsdb --- outputs/opentsdb/opentsdb.go | 62 +++++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/outputs/opentsdb/opentsdb.go b/outputs/opentsdb/opentsdb.go index 22c8c91fc..0146458dc 100644 --- a/outputs/opentsdb/opentsdb.go +++ b/outputs/opentsdb/opentsdb.go @@ -62,7 +62,8 @@ func (o *OpenTSDB) Write(points []*client.Point) error { if len(points) == 0 { return nil } - var timeNow = time.Now() + now := time.Now() + // Send Data with telnet / socket communication uri := fmt.Sprintf("%s:%d", o.Host, o.Port) tcpAddr, _ := net.ResolveTCPAddr("tcp", uri) @@ -70,32 +71,21 @@ func (o *OpenTSDB) Write(points []*client.Point) error { if err != nil { return fmt.Errorf("OpenTSDB: Telnet connect fail") } + defer connection.Close() + for _, pt := range points { - metric := &MetricLine{ - Metric: fmt.Sprintf("%s%s", o.Prefix, pt.Name()), - Timestamp: timeNow.Unix(), - } - - metricValue, buildError := buildValue(pt) - if buildError != nil { - fmt.Printf("OpenTSDB: %s\n", buildError.Error()) - continue - } - metric.Value = metricValue - - tagsSlice := buildTags(pt.Tags()) - metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) - - messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags) - if o.Debug { - fmt.Print(messageLine) - } - _, err := connection.Write([]byte(messageLine)) - if err != nil { - return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error()) + for _, metric := range buildMetrics(pt, now, o.Prefix) { + messageLine := fmt.Sprintf("put %s %v %s %s\n", + metric.Metric, metric.Timestamp, metric.Value, metric.Tags) + if o.Debug { + fmt.Print(messageLine) + } + _, err := connection.Write([]byte(messageLine)) + if err != nil { + return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error()) + } } } - defer connection.Close() return nil } @@ -111,9 +101,29 @@ func buildTags(ptTags map[string]string) []string { return tags } -func buildValue(pt *client.Point) (string, error) { +func buildMetrics(pt *client.Point, now time.Time, prefix string) []*MetricLine { + ret := []*MetricLine{} + for fieldName, value := range pt.Fields() { + metric := &MetricLine{ + Metric: fmt.Sprintf("%s%s_%s", prefix, pt.Name(), fieldName), + Timestamp: now.Unix(), + } + + metricValue, buildError := buildValue(value) + if buildError != nil { + fmt.Printf("OpenTSDB: %s\n", buildError.Error()) + continue + } + metric.Value = metricValue + tagsSlice := buildTags(pt.Tags()) + metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) + ret = append(ret, metric) + } + return ret +} + +func buildValue(v interface{}) (string, error) { var retv string - var v = pt.Fields()["value"] switch p := v.(type) { case int64: retv = IntToString(int64(p)) From 96e54ab32630a346659c5afe34f8fbbdad969caa Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 14:35:32 -0700 Subject: [PATCH 022/103] CHANGELOG update --- CHANGELOG.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e291a58f2..703a66ad3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,9 @@ ### Release Notes - **breaking change** the `io` plugin has been renamed `diskio` - **breaking change** Plugin measurements aggregated into a single measurement. +- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters +for configuration. +- `twemproxy` plugin: `prefix` option removed. - `procstat` cpu measurements are now prepended with `cpu_time_` instead of only `cpu_` - The prometheus plugin schema has not been changed (measurements have not been @@ -11,7 +14,9 @@ aggregated). ### Features - Plugin measurements aggregated into a single measurement. - Added ability to specify per-plugin tags -- Added ability to specify per-plugin measurement suffix and prefix +- Added ability to specify per-plugin measurement suffix and prefix. +(`name_prefix` and `name_suffix`) +- Added ability to override base plugin name. (`name_override`) ### Bugfixes From f60d846eb398e8ea5d1e1229c9f0298e778988ae Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 14:55:44 -0700 Subject: [PATCH 023/103] 0.3.0 outputs: riemann --- outputs/riemann/riemann.go | 40 ++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/outputs/riemann/riemann.go b/outputs/riemann/riemann.go index 1a02216e6..eaa0aab9c 100644 --- a/outputs/riemann/riemann.go +++ b/outputs/riemann/riemann.go @@ -55,8 +55,10 @@ func (r *Riemann) Write(points []*client.Point) error { var events []*raidman.Event for _, p := range points { - ev := buildEvent(p) - events = append(events, ev) + evs := buildEvents(p) + for _, ev := range evs { + events = append(events, ev) + } } var senderr = r.client.SendMulti(events) @@ -68,24 +70,28 @@ func (r *Riemann) Write(points []*client.Point) error { return nil } -func buildEvent(p *client.Point) *raidman.Event { - host, ok := p.Tags()["host"] - if !ok { - hostname, err := os.Hostname() - if err != nil { - host = "unknown" - } else { - host = hostname +func buildEvents(p *client.Point) []*raidman.Event { + events := []*raidman.Event{} + for fieldName, value := range p.Fields() { + host, ok := p.Tags()["host"] + if !ok { + hostname, err := os.Hostname() + if err != nil { + host = "unknown" + } else { + host = hostname + } } + + event := &raidman.Event{ + Host: host, + Service: p.Name() + "_" + fieldName, + Metric: value, + } + events = append(events, event) } - var event = &raidman.Event{ - Host: host, - Service: p.Name(), - Metric: p.Fields()["value"], - } - - return event + return events } func init() { From 41374aabcbd21860103570e3967b88b711000de9 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 15:29:07 -0700 Subject: [PATCH 024/103] 0.3.0 Removing internal parallelism: httpjson and exec --- plugins/exec/exec.go | 119 ++++++++--------------------------- plugins/httpjson/httpjson.go | 86 +++++++++++-------------- plugins/jolokia/jolokia.go | 11 ---- 3 files changed, 62 insertions(+), 154 deletions(-) diff --git a/plugins/exec/exec.go b/plugins/exec/exec.go index 1571b6bf4..87a8cc72f 100644 --- a/plugins/exec/exec.go +++ b/plugins/exec/exec.go @@ -3,13 +3,8 @@ package exec import ( "bytes" "encoding/json" - "errors" "fmt" - "math" "os/exec" - "strings" - "sync" - "time" "github.com/gonuts/go-shellquote" @@ -18,47 +13,28 @@ import ( ) const sampleConfig = ` - # specify commands via an array of tables - [[plugins.exec.commands]] # the command to run command = "/usr/bin/mycollector --foo=bar" # name of the command (used as a prefix for measurements) name = "mycollector" - - # Only run this command if it has been at least this many - # seconds since it last ran - interval = 10 ` type Exec struct { - Commands []*Command - runner Runner - clock Clock -} + Command string + Name string -type Command struct { - Command string - Name string - Interval int - lastRunAt time.Time + runner Runner } type Runner interface { - Run(*Command) ([]byte, error) -} - -type Clock interface { - Now() time.Time + Run(*Exec) ([]byte, error) } type CommandRunner struct{} -type RealClock struct{} - -func (c CommandRunner) Run(command *Command) ([]byte, error) { - command.lastRunAt = time.Now() - split_cmd, err := shellquote.Split(command.Command) +func (c CommandRunner) Run(e *Exec) ([]byte, error) { + split_cmd, err := shellquote.Split(e.Command) if err != nil || len(split_cmd) == 0 { return nil, fmt.Errorf("exec: unable to parse command, %s", err) } @@ -68,18 +44,14 @@ func (c CommandRunner) Run(command *Command) ([]byte, error) { cmd.Stdout = &out if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("exec: %s for command '%s'", err, command.Command) + return nil, fmt.Errorf("exec: %s for command '%s'", err, e.Command) } return out.Bytes(), nil } -func (c RealClock) Now() time.Time { - return time.Now() -} - func NewExec() *Exec { - return &Exec{runner: CommandRunner{}, clock: RealClock{}} + return &Exec{runner: CommandRunner{}} } func (e *Exec) SampleConfig() string { @@ -91,72 +63,31 @@ func (e *Exec) Description() string { } func (e *Exec) Gather(acc plugins.Accumulator) error { - var wg sync.WaitGroup - - errorChannel := make(chan error, len(e.Commands)) - - for _, c := range e.Commands { - wg.Add(1) - go func(c *Command, acc plugins.Accumulator) { - defer wg.Done() - err := e.gatherCommand(c, acc) - if err != nil { - errorChannel <- err - } - }(c, acc) + out, err := e.runner.Run(e) + if err != nil { + return err } - wg.Wait() - close(errorChannel) - - // Get all errors and return them as one giant error - errorStrings := []string{} - for err := range errorChannel { - errorStrings = append(errorStrings, err.Error()) + var jsonOut interface{} + err = json.Unmarshal(out, &jsonOut) + if err != nil { + return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s", + e.Command, err) } - if len(errorStrings) == 0 { - return nil + f := internal.JSONFlattener{} + err = f.FlattenJSON("", jsonOut) + if err != nil { + return err } - return errors.New(strings.Join(errorStrings, "\n")) -} -func (e *Exec) gatherCommand(c *Command, acc plugins.Accumulator) error { - secondsSinceLastRun := 0.0 - - if c.lastRunAt.Unix() == 0 { // means time is uninitialized - secondsSinceLastRun = math.Inf(1) + var msrmnt_name string + if e.Name == "" { + msrmnt_name = "exec" } else { - secondsSinceLastRun = (e.clock.Now().Sub(c.lastRunAt)).Seconds() - } - - if secondsSinceLastRun >= float64(c.Interval) { - out, err := e.runner.Run(c) - if err != nil { - return err - } - - var jsonOut interface{} - err = json.Unmarshal(out, &jsonOut) - if err != nil { - return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s", - c.Command, err) - } - - f := internal.JSONFlattener{} - err = f.FlattenJSON("", jsonOut) - if err != nil { - return err - } - - var msrmnt_name string - if c.Name == "" { - msrmnt_name = "exec" - } else { - msrmnt_name = "exec_" + c.Name - } - acc.AddFields(msrmnt_name, f.Fields, nil) + msrmnt_name = "exec_" + e.Name } + acc.AddFields(msrmnt_name, f.Fields, nil) return nil } diff --git a/plugins/httpjson/httpjson.go b/plugins/httpjson/httpjson.go index e2b44b7a9..b89f83576 100644 --- a/plugins/httpjson/httpjson.go +++ b/plugins/httpjson/httpjson.go @@ -15,16 +15,12 @@ import ( ) type HttpJson struct { - Services []Service - client HTTPClient -} - -type Service struct { Name string Servers []string Method string TagKeys []string Parameters map[string]string + client HTTPClient } type HTTPClient interface { @@ -48,31 +44,28 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { } var sampleConfig = ` - # Specify services via an array of tables - [[plugins.httpjson.services]] + # a name for the service being polled + name = "webserver_stats" - # a name for the service being polled - name = "webserver_stats" + # URL of each server in the service's cluster + servers = [ + "http://localhost:9999/stats/", + "http://localhost:9998/stats/", + ] - # URL of each server in the service's cluster - servers = [ - "http://localhost:9999/stats/", - "http://localhost:9998/stats/", - ] + # HTTP method to use (case-sensitive) + method = "GET" - # HTTP method to use (case-sensitive) - method = "GET" + # List of tag names to extract from top-level of JSON server response + # tag_keys = [ + # "my_tag_1", + # "my_tag_2" + # ] - # List of tag names to extract from top-level of JSON server response - # tag_keys = [ - # "my_tag_1", - # "my_tag_2" - # ] - - # HTTP parameters (all values must be strings) - [plugins.httpjson.services.parameters] - event_type = "cpu_spike" - threshold = "0.75" + # HTTP parameters (all values must be strings) + [plugins.httpjson.parameters] + event_type = "cpu_spike" + threshold = "0.75" ` func (h *HttpJson) SampleConfig() string { @@ -87,22 +80,16 @@ func (h *HttpJson) Description() string { func (h *HttpJson) Gather(acc plugins.Accumulator) error { var wg sync.WaitGroup - totalServers := 0 - for _, service := range h.Services { - totalServers += len(service.Servers) - } - errorChannel := make(chan error, totalServers) + errorChannel := make(chan error, len(h.Servers)) - for _, service := range h.Services { - for _, server := range service.Servers { - wg.Add(1) - go func(service Service, server string) { - defer wg.Done() - if err := h.gatherServer(acc, service, server); err != nil { - errorChannel <- err - } - }(service, server) - } + for _, server := range h.Servers { + wg.Add(1) + go func(server string) { + defer wg.Done() + if err := h.gatherServer(acc, server); err != nil { + errorChannel <- err + } + }(server) } wg.Wait() @@ -130,10 +117,9 @@ func (h *HttpJson) Gather(acc plugins.Accumulator) error { // error: Any error that may have occurred func (h *HttpJson) gatherServer( acc plugins.Accumulator, - service Service, serverURL string, ) error { - resp, err := h.sendRequest(service, serverURL) + resp, err := h.sendRequest(serverURL) if err != nil { return err } @@ -147,7 +133,7 @@ func (h *HttpJson) gatherServer( "server": serverURL, } - for _, tag := range service.TagKeys { + for _, tag := range h.TagKeys { switch v := jsonOut[tag].(type) { case string: tags[tag] = v @@ -162,10 +148,10 @@ func (h *HttpJson) gatherServer( } var msrmnt_name string - if service.Name == "" { + if h.Name == "" { msrmnt_name = "httpjson" } else { - msrmnt_name = "httpjson_" + service.Name + msrmnt_name = "httpjson_" + h.Name } acc.AddFields(msrmnt_name, f.Fields, nil) return nil @@ -178,7 +164,7 @@ func (h *HttpJson) gatherServer( // Returns: // string: body of the response // error : Any error that may have occurred -func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error) { +func (h *HttpJson) sendRequest(serverURL string) (string, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { @@ -186,21 +172,23 @@ func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error } params := url.Values{} - for k, v := range service.Parameters { + for k, v := range h.Parameters { params.Add(k, v) } requestURL.RawQuery = params.Encode() // Create + send request - req, err := http.NewRequest(service.Method, requestURL.String(), nil) + req, err := http.NewRequest(h.Method, requestURL.String(), nil) if err != nil { return "", err } + defer req.Body.Close() resp, err := h.client.MakeRequest(req) if err != nil { return "", err } + defer resp.Body.Close() defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) diff --git a/plugins/jolokia/jolokia.go b/plugins/jolokia/jolokia.go index c3241a892..8bebbc3c5 100644 --- a/plugins/jolokia/jolokia.go +++ b/plugins/jolokia/jolokia.go @@ -62,17 +62,6 @@ func (j *Jolokia) SampleConfig() string { [[plugins.jolokia.metrics]] name = "heap_memory_usage" jmx = "/java.lang:type=Memory/HeapMemoryUsage" - - - # This drops the 'committed' value from Eden space measurement - [[plugins.jolokia.metrics]] - name = "memory_eden" - jmx = "/java.lang:type=MemoryPool,name=PS Eden Space/Usage" - - # This passes only DaemonThreadCount and ThreadCount - [[plugins.jolokia.metrics]] - name = "heap_threads" - jmx = "/java.lang:type=Threading" ` } From e25ac0d587ef99b7910078036cc777bb04fa48b2 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 15:58:57 -0700 Subject: [PATCH 025/103] 0.3.0 Removing internal parallelism: postgresql --- plugins/postgresql/postgresql.go | 61 +++++++++----------------------- 1 file changed, 16 insertions(+), 45 deletions(-) diff --git a/plugins/postgresql/postgresql.go b/plugins/postgresql/postgresql.go index 5da776cbd..eaefadb50 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/postgresql/postgresql.go @@ -11,46 +11,32 @@ import ( _ "github.com/lib/pq" ) -type Server struct { +type Postgresql struct { Address string Databases []string OrderedColumns []string } -type Postgresql struct { - Servers []*Server -} - var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} var sampleConfig = ` - # specify servers via an array of tables - [[plugins.postgresql.servers]] - # specify address via a url matching: # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: # host=localhost user=pqotest password=... sslmode=... dbname=app_production # - # All connection parameters are optional. By default, the host is localhost - # and the user is the currently running user. For localhost, we default - # to sslmode=disable as well. + # All connection parameters are optional. # # Without the dbname parameter, the driver will default to a database # with the same name as the user. This dbname is just for instantiating a # connection with the server and doesn't restrict the databases we are trying # to grab metrics for. # - address = "host=localhost user=postgres sslmode=disable" # A list of databases to pull metrics about. If not specified, metrics for all # databases are gathered. - - # databases = ["app_production", "blah_testing"] - - # [[plugins.postgresql.servers]] - # address = "influx@remoteserver" + # databases = ["app_production", "testing"] ` func (p *Postgresql) SampleConfig() string { @@ -65,42 +51,27 @@ func (p *Postgresql) IgnoredColumns() map[string]bool { return ignoredColumns } -var localhost = &Server{Address: "sslmode=disable"} +var localhost = "host=localhost sslmode=disable" func (p *Postgresql) Gather(acc plugins.Accumulator) error { - if len(p.Servers) == 0 { - p.gatherServer(localhost, acc) - return nil - } - - for _, serv := range p.Servers { - err := p.gatherServer(serv, acc) - if err != nil { - return err - } - } - - return nil -} - -func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error { var query string - if serv.Address == "" || serv.Address == "localhost" { - serv = localhost + if p.Address == "" || p.Address == "localhost" { + p.Address = localhost } - db, err := sql.Open("postgres", serv.Address) + db, err := sql.Open("postgres", p.Address) if err != nil { return err } defer db.Close() - if len(serv.Databases) == 0 { + if len(p.Databases) == 0 { query = `SELECT * FROM pg_stat_database` } else { - query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`, strings.Join(serv.Databases, "','")) + query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`, + strings.Join(p.Databases, "','")) } rows, err := db.Query(query) @@ -111,13 +82,13 @@ func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error { defer rows.Close() // grab the column information from the result - serv.OrderedColumns, err = rows.Columns() + p.OrderedColumns, err = rows.Columns() if err != nil { return err } for rows.Next() { - err = p.accRow(rows, acc, serv) + err = p.accRow(rows, acc) if err != nil { return err } @@ -130,20 +101,20 @@ type scanner interface { Scan(dest ...interface{}) error } -func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) error { +func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator) error { var columnVars []interface{} var dbname bytes.Buffer // this is where we'll store the column name with its *interface{} columnMap := make(map[string]*interface{}) - for _, column := range serv.OrderedColumns { + for _, column := range p.OrderedColumns { columnMap[column] = new(interface{}) } // populate the array of interface{} with the pointers in the right order for i := 0; i < len(columnMap); i++ { - columnVars = append(columnVars, columnMap[serv.OrderedColumns[i]]) + columnVars = append(columnVars, columnMap[p.OrderedColumns[i]]) } // deconstruct array of variables and send to Scan @@ -159,7 +130,7 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) dbname.WriteString(string(dbnameChars[i])) } - tags := map[string]string{"server": serv.Address, "db": dbname.String()} + tags := map[string]string{"server": p.Address, "db": dbname.String()} fields := make(map[string]interface{}) for col, val := range columnMap { From c8914679b79b8b64913679bc6eec4610d93476cc Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 16:06:21 -0700 Subject: [PATCH 026/103] 0.3.0 Removing internal parallelism: procstat --- plugins/procstat/procstat.go | 59 ++++++++++++++---------------------- 1 file changed, 23 insertions(+), 36 deletions(-) diff --git a/plugins/procstat/procstat.go b/plugins/procstat/procstat.go index 1370a0003..2f171db3e 100644 --- a/plugins/procstat/procstat.go +++ b/plugins/procstat/procstat.go @@ -7,22 +7,17 @@ import ( "os/exec" "strconv" "strings" - "sync" "github.com/shirou/gopsutil/process" "github.com/influxdb/telegraf/plugins" ) -type Specification struct { +type Procstat struct { PidFile string `toml:"pid_file"` Exe string - Prefix string Pattern string -} - -type Procstat struct { - Specifications []*Specification + Prefix string } func NewProcstat() *Procstat { @@ -30,8 +25,6 @@ func NewProcstat() *Procstat { } var sampleConfig = ` - [[plugins.procstat.specifications]] - prefix = "" # optional string to prefix measurements # Must specify one of: pid_file, exe, or pattern # PID file to monitor process pid_file = "/var/run/nginx.pid" @@ -39,6 +32,9 @@ var sampleConfig = ` # exe = "nginx" # pattern as argument for pgrep (ie, pgrep -f ) # pattern = "nginx" + + # Field name prefix + prefix = "" ` func (_ *Procstat) SampleConfig() string { @@ -50,35 +46,26 @@ func (_ *Procstat) Description() string { } func (p *Procstat) Gather(acc plugins.Accumulator) error { - var wg sync.WaitGroup - - for _, specification := range p.Specifications { - wg.Add(1) - go func(spec *Specification, acc plugins.Accumulator) { - defer wg.Done() - procs, err := spec.createProcesses() - if err != nil { - log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s", - spec.Exe, spec.PidFile, spec.Pattern, err.Error()) - } else { - for _, proc := range procs { - p := NewSpecProcessor(spec.Prefix, acc, proc) - p.pushMetrics() - } - } - }(specification, acc) + procs, err := p.createProcesses() + if err != nil { + log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s", + p.Exe, p.PidFile, p.Pattern, err.Error()) + } else { + for _, proc := range procs { + p := NewSpecProcessor(p.Prefix, acc, proc) + p.pushMetrics() + } } - wg.Wait() return nil } -func (spec *Specification) createProcesses() ([]*process.Process, error) { +func (p *Procstat) createProcesses() ([]*process.Process, error) { var out []*process.Process var errstring string var outerr error - pids, err := spec.getAllPids() + pids, err := p.getAllPids() if err != nil { errstring += err.Error() + " " } @@ -99,16 +86,16 @@ func (spec *Specification) createProcesses() ([]*process.Process, error) { return out, outerr } -func (spec *Specification) getAllPids() ([]int32, error) { +func (p *Procstat) getAllPids() ([]int32, error) { var pids []int32 var err error - if spec.PidFile != "" { - pids, err = pidsFromFile(spec.PidFile) - } else if spec.Exe != "" { - pids, err = pidsFromExe(spec.Exe) - } else if spec.Pattern != "" { - pids, err = pidsFromPattern(spec.Pattern) + if p.PidFile != "" { + pids, err = pidsFromFile(p.PidFile) + } else if p.Exe != "" { + pids, err = pidsFromExe(p.Exe) + } else if p.Pattern != "" { + pids, err = pidsFromPattern(p.Pattern) } else { err = fmt.Errorf("Either exe, pid_file or pattern has to be specified") } From 2e764cb22da08da8ede886535100a5626c3c0e20 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 19 Dec 2015 20:26:18 -0700 Subject: [PATCH 027/103] 0.3.0 Removing internal parallelism: twemproxy and rabbitmq --- plugins/rabbitmq/rabbitmq.go | 75 ++++++++++++++-------------------- plugins/twemproxy/twemproxy.go | 61 ++++++--------------------- 2 files changed, 43 insertions(+), 93 deletions(-) diff --git a/plugins/rabbitmq/rabbitmq.go b/plugins/rabbitmq/rabbitmq.go index 7101ab431..227811bc8 100644 --- a/plugins/rabbitmq/rabbitmq.go +++ b/plugins/rabbitmq/rabbitmq.go @@ -14,17 +14,13 @@ const DefaultUsername = "guest" const DefaultPassword = "guest" const DefaultURL = "http://localhost:15672" -type Server struct { +type RabbitMQ struct { URL string Name string Username string Password string Nodes []string Queues []string -} - -type RabbitMQ struct { - Servers []*Server Client *http.Client } @@ -95,15 +91,13 @@ type Node struct { SocketsUsed int64 `json:"sockets_used"` } -type gatherFunc func(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) +type gatherFunc func(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues} var sampleConfig = ` - # Specify servers via an array of tables - [[plugins.rabbitmq.servers]] + url = "http://localhost:15672" # required # name = "rmq-server-1" # optional tag - # url = "http://localhost:15672" # username = "guest" # password = "guest" @@ -120,27 +114,18 @@ func (r *RabbitMQ) Description() string { return "Read metrics from one or many RabbitMQ servers via the management API" } -var localhost = &Server{URL: DefaultURL} - func (r *RabbitMQ) Gather(acc plugins.Accumulator) error { if r.Client == nil { r.Client = &http.Client{} } - var errChan = make(chan error, len(r.Servers)) + var errChan = make(chan error, len(gatherFunctions)) - // use localhost is no servers are specified in config - if len(r.Servers) == 0 { - r.Servers = append(r.Servers, localhost) + for _, f := range gatherFunctions { + go f(r, acc, errChan) } - for _, serv := range r.Servers { - for _, f := range gatherFunctions { - go f(r, serv, acc, errChan) - } - } - - for i := 1; i <= len(r.Servers)*len(gatherFunctions); i++ { + for i := 1; i <= len(gatherFunctions); i++ { err := <-errChan if err != nil { return err @@ -150,20 +135,20 @@ func (r *RabbitMQ) Gather(acc plugins.Accumulator) error { return nil } -func (r *RabbitMQ) requestJSON(serv *Server, u string, target interface{}) error { - u = fmt.Sprintf("%s%s", serv.URL, u) +func (r *RabbitMQ) requestJSON(u string, target interface{}) error { + u = fmt.Sprintf("%s%s", r.URL, u) req, err := http.NewRequest("GET", u, nil) if err != nil { return err } - username := serv.Username + username := r.Username if username == "" { username = DefaultUsername } - password := serv.Password + password := r.Password if password == "" { password = DefaultPassword } @@ -182,10 +167,10 @@ func (r *RabbitMQ) requestJSON(serv *Server, u string, target interface{}) error return nil } -func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) { +func gatherOverview(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) { overview := &OverviewResponse{} - err := r.requestJSON(serv, "/api/overview", &overview) + err := r.requestJSON("/api/overview", &overview) if err != nil { errChan <- err return @@ -196,9 +181,9 @@ func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan return } - tags := map[string]string{"url": serv.URL} - if serv.Name != "" { - tags["name"] = serv.Name + tags := map[string]string{"url": r.URL} + if r.Name != "" { + tags["name"] = r.Name } fields := map[string]interface{}{ "messages": overview.QueueTotals.Messages, @@ -218,10 +203,10 @@ func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan errChan <- nil } -func gatherNodes(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) { +func gatherNodes(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) { nodes := make([]Node, 0) // Gather information about nodes - err := r.requestJSON(serv, "/api/nodes", &nodes) + err := r.requestJSON("/api/nodes", &nodes) if err != nil { errChan <- err return @@ -229,11 +214,11 @@ func gatherNodes(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan cha now := time.Now() for _, node := range nodes { - if !shouldGatherNode(node, serv) { + if !r.shouldGatherNode(node) { continue } - tags := map[string]string{"url": serv.URL} + tags := map[string]string{"url": r.URL} tags["node"] = node.Name fields := map[string]interface{}{ @@ -255,21 +240,21 @@ func gatherNodes(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan cha errChan <- nil } -func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) { +func gatherQueues(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) { // Gather information about queues queues := make([]Queue, 0) - err := r.requestJSON(serv, "/api/queues", &queues) + err := r.requestJSON("/api/queues", &queues) if err != nil { errChan <- err return } for _, queue := range queues { - if !shouldGatherQueue(queue, serv) { + if !r.shouldGatherQueue(queue) { continue } tags := map[string]string{ - "url": serv.URL, + "url": r.URL, "queue": queue.Name, "vhost": queue.Vhost, "node": queue.Node, @@ -306,12 +291,12 @@ func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan ch errChan <- nil } -func shouldGatherNode(node Node, serv *Server) bool { - if len(serv.Nodes) == 0 { +func (r *RabbitMQ) shouldGatherNode(node Node) bool { + if len(r.Nodes) == 0 { return true } - for _, name := range serv.Nodes { + for _, name := range r.Nodes { if name == node.Name { return true } @@ -320,12 +305,12 @@ func shouldGatherNode(node Node, serv *Server) bool { return false } -func shouldGatherQueue(queue Queue, serv *Server) bool { - if len(serv.Queues) == 0 { +func (r *RabbitMQ) shouldGatherQueue(queue Queue) bool { + if len(r.Queues) == 0 { return true } - for _, name := range serv.Queues { + for _, name := range r.Queues { if name == queue.Name { return true } diff --git a/plugins/twemproxy/twemproxy.go b/plugins/twemproxy/twemproxy.go index 1933e8d0d..fe3fb6de5 100644 --- a/plugins/twemproxy/twemproxy.go +++ b/plugins/twemproxy/twemproxy.go @@ -5,28 +5,21 @@ import ( "errors" "io/ioutil" "net" - "strings" - "sync" "time" "github.com/influxdb/telegraf/plugins" ) type Twemproxy struct { - Instances []TwemproxyInstance -} - -type TwemproxyInstance struct { Addr string Pools []string } var sampleConfig = ` - [[plugins.twemproxy.instances]] - # Twemproxy stats address and port (no scheme) - addr = "localhost:22222" - # Monitor pool name - pools = ["redis_pool", "mc_pool"] + # Twemproxy stats address and port (no scheme) + addr = "localhost:22222" + # Monitor pool name + pools = ["redis_pool", "mc_pool"] ` func (t *Twemproxy) SampleConfig() string { @@ -39,35 +32,7 @@ func (t *Twemproxy) Description() string { // Gather data from all Twemproxy instances func (t *Twemproxy) Gather(acc plugins.Accumulator) error { - var wg sync.WaitGroup - errorChan := make(chan error, len(t.Instances)) - for _, inst := range t.Instances { - wg.Add(1) - go func(inst TwemproxyInstance) { - defer wg.Done() - if err := inst.Gather(acc); err != nil { - errorChan <- err - } - }(inst) - } - wg.Wait() - - close(errorChan) - errs := []string{} - for err := range errorChan { - errs = append(errs, err.Error()) - } - if len(errs) == 0 { - return nil - } - return errors.New(strings.Join(errs, "\n")) -} - -// Gather data from one Twemproxy -func (ti *TwemproxyInstance) Gather( - acc plugins.Accumulator, -) error { - conn, err := net.DialTimeout("tcp", ti.Addr, 1*time.Second) + conn, err := net.DialTimeout("tcp", t.Addr, 1*time.Second) if err != nil { return err } @@ -82,14 +47,14 @@ func (ti *TwemproxyInstance) Gather( } tags := make(map[string]string) - tags["twemproxy"] = ti.Addr - ti.processStat(acc, tags, stats) + tags["twemproxy"] = t.Addr + t.processStat(acc, tags, stats) return nil } // Process Twemproxy server stats -func (ti *TwemproxyInstance) processStat( +func (t *Twemproxy) processStat( acc plugins.Accumulator, tags map[string]string, data map[string]interface{}, @@ -111,19 +76,19 @@ func (ti *TwemproxyInstance) processStat( } acc.AddFields("twemproxy", fields, tags) - for _, pool := range ti.Pools { + for _, pool := range t.Pools { if poolStat, ok := data[pool]; ok { if data, ok := poolStat.(map[string]interface{}); ok { poolTags := copyTags(tags) poolTags["pool"] = pool - ti.processPool(acc, poolTags, data) + t.processPool(acc, poolTags, data) } } } } // Process pool data in Twemproxy stats -func (ti *TwemproxyInstance) processPool( +func (t *Twemproxy) processPool( acc plugins.Accumulator, tags map[string]string, data map[string]interface{}, @@ -143,7 +108,7 @@ func (ti *TwemproxyInstance) processPool( serverTags[key] = copyTags(tags) serverTags[key]["server"] = key } - ti.processServer(acc, serverTags[key], data) + t.processServer(acc, serverTags[key], data) } } } @@ -151,7 +116,7 @@ func (ti *TwemproxyInstance) processPool( } // Process backend server(redis/memcached) stats -func (ti *TwemproxyInstance) processServer( +func (t *Twemproxy) processServer( acc plugins.Accumulator, tags map[string]string, data map[string]interface{}, From 4bd5b6a4d6d3e822eef2805c3da292ecfc399c52 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 21 Dec 2015 13:42:32 -0800 Subject: [PATCH 028/103] Fix httpjson panic for nil request body --- plugins/httpjson/httpjson.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/httpjson/httpjson.go b/plugins/httpjson/httpjson.go index b89f83576..40a771a91 100644 --- a/plugins/httpjson/httpjson.go +++ b/plugins/httpjson/httpjson.go @@ -182,7 +182,6 @@ func (h *HttpJson) sendRequest(serverURL string) (string, error) { if err != nil { return "", err } - defer req.Body.Close() resp, err := h.client.MakeRequest(req) if err != nil { From 498482d0f69d6992437aa977cf5b117f7dfa825f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Jan 2016 16:58:35 -0700 Subject: [PATCH 029/103] 0.3.0 unit tests: system plugins --- plugins/system/cpu_test.go | 118 +++++++++++++++------- plugins/system/disk_test.go | 182 +++++++++++++++++----------------- plugins/system/docker_test.go | 78 ++++++++------- plugins/system/memory_test.go | 41 ++++---- plugins/system/net.go | 4 +- plugins/system/net_test.go | 48 ++++++--- plugins/system/ps.go | 52 ---------- testutil/accumulator.go | 141 ++++++++------------------ 8 files changed, 311 insertions(+), 353 deletions(-) diff --git a/plugins/system/cpu_test.go b/plugins/system/cpu_test.go index 843d166cb..c85734adc 100644 --- a/plugins/system/cpu_test.go +++ b/plugins/system/cpu_test.go @@ -1,6 +1,7 @@ package system import ( + "fmt" "testing" "github.com/influxdb/telegraf/testutil" @@ -52,23 +53,19 @@ func TestCPUStats(t *testing.T) { err := cs.Gather(&acc) require.NoError(t, err) - numCPUPoints := len(acc.Points) - - expectedCPUPoints := 10 - assert.Equal(t, expectedCPUPoints, numCPUPoints) // Computed values are checked with delta > 0 becasue of floating point arithmatic // imprecision - assertContainsTaggedFloat(t, &acc, "time_user", 3.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_iowait", 0.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_irq", 0.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest", 8.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 3.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 1.3, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 0.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.11, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.0511, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 8.1, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags) mps2 := MockPS{} mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil) @@ -78,29 +75,74 @@ func TestCPUStats(t *testing.T) { err = cs.Gather(&acc) require.NoError(t, err) - numCPUPoints = len(acc.Points) - numCPUPoints - expectedCPUPoints = 20 - assert.Equal(t, expectedCPUPoints, numCPUPoints) + assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 11.4, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 10.9, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 158.8699, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 2.5, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.7, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 1.2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.31, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.2812, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 12.9, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 2.524, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_user", 11.4, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_idle", 158.8699, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_nice", 2.5, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_iowait", 0.7, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest", 12.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0, cputags) - - assertContainsTaggedFloat(t, &acc, "usage_user", 8.3, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_idle", 78.7699, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_nice", 1.2, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.5, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_irq", 1.1, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_guest", 4.8, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 8.3, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_system", 2.7, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 78.7699, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_nice", 1.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 0.5, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_irq", 1.1, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_softirq", 0.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_steal", 0.2301, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest", 4.8, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags) +} + +// Asserts that a given accumulator contains a measurment of type float64 with +// specific tags within a certain distance of a given expected value. Asserts a failure +// if the measurement is of the wrong type, or if no matching measurements are found +// +// Paramaters: +// t *testing.T : Testing object to use +// acc testutil.Accumulator: Accumulator to examine +// measurement string : Name of the measurement to examine +// expectedValue float64 : Value to search for within the measurement +// delta float64 : Maximum acceptable distance of an accumulated value +// from the expectedValue parameter. Useful when +// floating-point arithmatic imprecision makes looking +// for an exact match impractical +// tags map[string]string : Tag set the found measurement must have. Set to nil to +// ignore the tag set. +func assertContainsTaggedFloat( + t *testing.T, + acc *testutil.Accumulator, + measurement string, + field string, + expectedValue float64, + delta float64, + tags map[string]string, +) { + var actualValue float64 + for _, pt := range acc.Points { + if pt.Measurement == measurement { + for fieldname, value := range pt.Fields { + if fieldname == field { + if value, ok := value.(float64); ok { + actualValue = value + if (value >= expectedValue-delta) && (value <= expectedValue+delta) { + // Found the point, return without failing + return + } + } else { + assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", + measurement)) + } + } + } + } + } + msg := fmt.Sprintf( + "Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", + measurement, delta, expectedValue, actualValue) + assert.Fail(t, msg) } diff --git a/plugins/system/disk_test.go b/plugins/system/disk_test.go index abeba736b..6ad7b171b 100644 --- a/plugins/system/disk_test.go +++ b/plugins/system/disk_test.go @@ -39,7 +39,7 @@ func TestDiskStats(t *testing.T) { err = (&DiskStats{ps: &mps}).Gather(&acc) require.NoError(t, err) - numDiskPoints := len(acc.Points) + numDiskPoints := acc.NFields() expectedAllDiskPoints := 12 assert.Equal(t, expectedAllDiskPoints, numDiskPoints) @@ -52,110 +52,114 @@ func TestDiskStats(t *testing.T) { "fstype": "ext4", } - assert.True(t, acc.CheckTaggedValue("total", uint64(128), tags1)) - assert.True(t, acc.CheckTaggedValue("used", uint64(105), tags1)) - assert.True(t, acc.CheckTaggedValue("free", uint64(23), tags1)) - assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(1234), tags1)) - assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(234), tags1)) - assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(1000), tags1)) - assert.True(t, acc.CheckTaggedValue("total", uint64(256), tags2)) - assert.True(t, acc.CheckTaggedValue("used", uint64(210), tags2)) - assert.True(t, acc.CheckTaggedValue("free", uint64(46), tags2)) - assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(2468), tags2)) - assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(468), tags2)) - assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(2000), tags2)) + fields1 := map[string]interface{}{ + "total": uint64(128), //tags1) + "used": uint64(105), //tags1) + "free": uint64(23), //tags1) + "inodes_total": uint64(1234), //tags1) + "inodes_free": uint64(234), //tags1) + "inodes_used": uint64(1000), //tags1) + } + fields2 := map[string]interface{}{ + "total": uint64(256), //tags2) + "used": uint64(210), //tags2) + "free": uint64(46), //tags2) + "inodes_total": uint64(2468), //tags2) + "inodes_free": uint64(468), //tags2) + "inodes_used": uint64(2000), //tags2) + } + acc.AssertContainsFields(t, "disk", fields1, tags1) + acc.AssertContainsFields(t, "disk", fields2, tags2) // We expect 6 more DiskPoints to show up with an explicit match on "/" // and /home not matching the /dev in Mountpoints err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc) - assert.Equal(t, expectedAllDiskPoints+6, len(acc.Points)) + assert.Equal(t, expectedAllDiskPoints+6, acc.NFields()) // We should see all the diskpoints as Mountpoints includes both // / and /home err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc) - assert.Equal(t, 2*expectedAllDiskPoints+6, len(acc.Points)) - + assert.Equal(t, 2*expectedAllDiskPoints+6, acc.NFields()) } -func TestDiskIOStats(t *testing.T) { - var mps MockPS - defer mps.AssertExpectations(t) - var acc testutil.Accumulator - var err error +// func TestDiskIOStats(t *testing.T) { +// var mps MockPS +// defer mps.AssertExpectations(t) +// var acc testutil.Accumulator +// var err error - diskio1 := disk.DiskIOCountersStat{ +// diskio1 := disk.DiskIOCountersStat{ +// ReadCount: 888, +// WriteCount: 5341, +// ReadBytes: 100000, +// WriteBytes: 200000, +// ReadTime: 7123, +// WriteTime: 9087, +// Name: "sda1", +// IoTime: 123552, +// SerialNumber: "ab-123-ad", +// } +// diskio2 := disk.DiskIOCountersStat{ +// ReadCount: 444, +// WriteCount: 2341, +// ReadBytes: 200000, +// WriteBytes: 400000, +// ReadTime: 3123, +// WriteTime: 6087, +// Name: "sdb1", +// IoTime: 246552, +// SerialNumber: "bb-123-ad", +// } - ReadCount: 888, - WriteCount: 5341, - ReadBytes: 100000, - WriteBytes: 200000, - ReadTime: 7123, - WriteTime: 9087, - Name: "sda1", - IoTime: 123552, - SerialNumber: "ab-123-ad", - } - diskio2 := disk.DiskIOCountersStat{ - ReadCount: 444, - WriteCount: 2341, - ReadBytes: 200000, - WriteBytes: 400000, - ReadTime: 3123, - WriteTime: 6087, - Name: "sdb1", - IoTime: 246552, - SerialNumber: "bb-123-ad", - } +// mps.On("DiskIO").Return( +// map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2}, +// nil) - mps.On("DiskIO").Return( - map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2}, - nil) +// err = (&DiskIOStats{ps: &mps}).Gather(&acc) +// require.NoError(t, err) - err = (&DiskIOStats{ps: &mps}).Gather(&acc) - require.NoError(t, err) +// numDiskIOPoints := acc.NFields() +// expectedAllDiskIOPoints := 14 +// assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints) - numDiskIOPoints := len(acc.Points) - expectedAllDiskIOPoints := 14 - assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints) +// dtags1 := map[string]string{ +// "name": "sda1", +// "serial": "ab-123-ad", +// } +// dtags2 := map[string]string{ +// "name": "sdb1", +// "serial": "bb-123-ad", +// } - dtags1 := map[string]string{ - "name": "sda1", - "serial": "ab-123-ad", - } - dtags2 := map[string]string{ - "name": "sdb1", - "serial": "bb-123-ad", - } +// assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1)) +// assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1)) +// assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1)) +// assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1)) +// assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1)) +// assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1)) +// assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1)) +// assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2)) +// assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2)) +// assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2)) +// assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2)) +// assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2)) +// assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2)) +// assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2)) - assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1)) - assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1)) - assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1)) - assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1)) - assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1)) - assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1)) - assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1)) - assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2)) - assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2)) - assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2)) - assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2)) - assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2)) - assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2)) - assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2)) +// // We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1" +// // and serial should be missing from the tags with SkipSerialNumber set +// err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc) +// assert.Equal(t, expectedAllDiskIOPoints+7, acc.NFields()) - // We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1" - // and serial should be missing from the tags with SkipSerialNumber set - err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc) - assert.Equal(t, expectedAllDiskIOPoints+7, len(acc.Points)) +// dtags3 := map[string]string{ +// "name": "sdb1", +// } - dtags3 := map[string]string{ - "name": "sdb1", - } - - assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3)) - assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3)) - assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3)) - assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3)) - assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3)) - assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3)) - assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3)) -} +// assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3)) +// assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3)) +// assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3)) +// assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3)) +// assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3)) +// assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3)) +// assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3)) +// } diff --git a/plugins/system/docker_test.go b/plugins/system/docker_test.go index 1fbf76d10..5bfcf986e 100644 --- a/plugins/system/docker_test.go +++ b/plugins/system/docker_test.go @@ -75,42 +75,46 @@ func TestDockerStats_GenerateStats(t *testing.T) { "command": "", } - assert.True(t, acc.CheckTaggedValue("user", 3.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("system", 8.2, dockertags)) - assert.True(t, acc.CheckTaggedValue("idle", 80.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("nice", 1.3, dockertags)) - assert.True(t, acc.CheckTaggedValue("iowait", 0.2, dockertags)) - assert.True(t, acc.CheckTaggedValue("irq", 0.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("softirq", 0.11, dockertags)) - assert.True(t, acc.CheckTaggedValue("steal", 0.0001, dockertags)) - assert.True(t, acc.CheckTaggedValue("guest", 8.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("guest_nice", 0.324, dockertags)) + fields := map[string]interface{}{ + "user": 3.1, + "system": 8.2, + "idle": 80.1, + "nice": 1.3, + "iowait": 0.2, + "irq": 0.1, + "softirq": 0.11, + "steal": 0.0001, + "guest": 8.1, + "guest_nice": 0.324, - assert.True(t, acc.CheckTaggedValue("cache", uint64(1), dockertags)) - assert.True(t, acc.CheckTaggedValue("rss", uint64(2), dockertags)) - assert.True(t, acc.CheckTaggedValue("rss_huge", uint64(3), dockertags)) - assert.True(t, acc.CheckTaggedValue("mapped_file", uint64(4), dockertags)) - assert.True(t, acc.CheckTaggedValue("swap_in", uint64(5), dockertags)) - assert.True(t, acc.CheckTaggedValue("swap_out", uint64(6), dockertags)) - assert.True(t, acc.CheckTaggedValue("page_fault", uint64(7), dockertags)) - assert.True(t, acc.CheckTaggedValue("page_major_fault", uint64(8), dockertags)) - assert.True(t, acc.CheckTaggedValue("inactive_anon", uint64(9), dockertags)) - assert.True(t, acc.CheckTaggedValue("active_anon", uint64(10), dockertags)) - assert.True(t, acc.CheckTaggedValue("inactive_file", uint64(11), dockertags)) - assert.True(t, acc.CheckTaggedValue("active_file", uint64(12), dockertags)) - assert.True(t, acc.CheckTaggedValue("unevictable", uint64(13), dockertags)) - assert.True(t, acc.CheckTaggedValue("memory_limit", uint64(14), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_cache", uint64(15), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_rss", uint64(16), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_rss_huge", uint64(17), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_mapped_file", uint64(18), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_swap_in", uint64(19), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_swap_out", uint64(20), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_page_fault", uint64(21), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_page_major_fault", uint64(22), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_inactive_anon", uint64(23), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_active_anon", uint64(24), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_inactive_file", uint64(25), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_active_file", uint64(26), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_unevictable", uint64(27), dockertags)) + "cache": uint64(1), + "rss": uint64(2), + "rss_huge": uint64(3), + "mapped_file": uint64(4), + "swap_in": uint64(5), + "swap_out": uint64(6), + "page_fault": uint64(7), + "page_major_fault": uint64(8), + "inactive_anon": uint64(9), + "active_anon": uint64(10), + "inactive_file": uint64(11), + "active_file": uint64(12), + "unevictable": uint64(13), + "memory_limit": uint64(14), + "total_cache": uint64(15), + "total_rss": uint64(16), + "total_rss_huge": uint64(17), + "total_mapped_file": uint64(18), + "total_swap_in": uint64(19), + "total_swap_out": uint64(20), + "total_page_fault": uint64(21), + "total_page_major_fault": uint64(22), + "total_inactive_anon": uint64(23), + "total_active_anon": uint64(24), + "total_inactive_file": uint64(25), + "total_active_file": uint64(26), + "total_unevictable": uint64(27), + } + + acc.AssertContainsFields(t, "docker", fields, dockertags) } diff --git a/plugins/system/memory_test.go b/plugins/system/memory_test.go index 4b97501a9..ca4a07bae 100644 --- a/plugins/system/memory_test.go +++ b/plugins/system/memory_test.go @@ -5,7 +5,6 @@ import ( "github.com/influxdb/telegraf/testutil" "github.com/shirou/gopsutil/mem" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -44,30 +43,30 @@ func TestMemStats(t *testing.T) { err = (&MemStats{&mps}).Gather(&acc) require.NoError(t, err) - vmtags := map[string]string(nil) - - assert.True(t, acc.CheckTaggedValue("total", uint64(12400), vmtags)) - assert.True(t, acc.CheckTaggedValue("available", uint64(7600), vmtags)) - assert.True(t, acc.CheckTaggedValue("used", uint64(5000), vmtags)) - assert.True(t, acc.CheckTaggedValue("available_percent", - float64(7600)/float64(12400)*100, - vmtags)) - assert.True(t, acc.CheckTaggedValue("used_percent", - float64(5000)/float64(12400)*100, - vmtags)) - assert.True(t, acc.CheckTaggedValue("free", uint64(1235), vmtags)) + memfields := map[string]interface{}{ + "total": uint64(12400), + "available": uint64(7600), + "used": uint64(5000), + "available_percent": float64(7600) / float64(12400) * 100, + "used_percent": float64(5000) / float64(12400) * 100, + "free": uint64(1235), + "cached": uint64(0), + "buffered": uint64(0), + } + acc.AssertContainsFields(t, "mem", memfields, nil) acc.Points = nil err = (&SwapStats{&mps}).Gather(&acc) require.NoError(t, err) - swaptags := map[string]string(nil) - - assert.NoError(t, acc.ValidateTaggedValue("total", uint64(8123), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("used", uint64(1232), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("used_percent", float64(12.2), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("free", uint64(6412), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("in", uint64(7), swaptags)) - assert.NoError(t, acc.ValidateTaggedValue("out", uint64(830), swaptags)) + swapfields := map[string]interface{}{ + "total": uint64(8123), + "used": uint64(1232), + "used_percent": float64(12.2), + "free": uint64(6412), + "in": uint64(7), + "out": uint64(830), + } + acc.AssertContainsFields(t, "swap", swapfields, nil) } diff --git a/plugins/system/net.go b/plugins/system/net.go index 23f856d6d..72c450222 100644 --- a/plugins/system/net.go +++ b/plugins/system/net.go @@ -86,13 +86,15 @@ func (s *NetIOStats) Gather(acc plugins.Accumulator) error { // Get system wide stats for different network protocols // (ignore these stats if the call fails) netprotos, _ := s.ps.NetProto() + fields := make(map[string]interface{}) for _, proto := range netprotos { for stat, value := range proto.Stats { name := fmt.Sprintf("%s_%s", strings.ToLower(proto.Protocol), strings.ToLower(stat)) - acc.Add(name, value, nil) + fields[name] = value } } + acc.AddFields("net", fields, nil) return nil } diff --git a/plugins/system/net_test.go b/plugins/system/net_test.go index 042b6a2fb..ee6010580 100644 --- a/plugins/system/net_test.go +++ b/plugins/system/net_test.go @@ -6,7 +6,6 @@ import ( "github.com/influxdb/telegraf/testutil" "github.com/shirou/gopsutil/net" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -65,24 +64,43 @@ func TestNetStats(t *testing.T) { "interface": "eth0", } - assert.NoError(t, acc.ValidateTaggedValue("bytes_sent", uint64(1123), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("bytes_recv", uint64(8734422), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_sent", uint64(781), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_recv", uint64(23456), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("err_in", uint64(832), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("err_out", uint64(8), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("drop_in", uint64(7), ntags)) - assert.NoError(t, acc.ValidateTaggedValue("drop_out", uint64(1), ntags)) - assert.NoError(t, acc.ValidateValue("udp_noports", int64(892592))) - assert.NoError(t, acc.ValidateValue("udp_indatagrams", int64(4655))) + fields1 := map[string]interface{}{ + "bytes_sent": uint64(1123), + "bytes_recv": uint64(8734422), + "packets_sent": uint64(781), + "packets_recv": uint64(23456), + "err_in": uint64(832), + "err_out": uint64(8), + "drop_in": uint64(7), + "drop_out": uint64(1), + } + acc.AssertContainsFields(t, "net", fields1, ntags) + + fields2 := map[string]interface{}{ + "udp_noports": int64(892592), + "udp_indatagrams": int64(4655), + } + acc.AssertContainsFields(t, "net", fields2, nil) acc.Points = nil err = (&NetStats{&mps}).Gather(&acc) require.NoError(t, err) - netstattags := map[string]string(nil) - assert.NoError(t, acc.ValidateTaggedValue("tcp_established", 2, netstattags)) - assert.NoError(t, acc.ValidateTaggedValue("tcp_close", 1, netstattags)) - assert.NoError(t, acc.ValidateTaggedValue("udp_socket", 1, netstattags)) + fields3 := map[string]interface{}{ + "tcp_established": 2, + "tcp_syn_sent": 0, + "tcp_syn_recv": 0, + "tcp_fin_wait1": 0, + "tcp_fin_wait2": 0, + "tcp_time_wait": 0, + "tcp_close": 1, + "tcp_close_wait": 0, + "tcp_last_ack": 0, + "tcp_listen": 0, + "tcp_closing": 0, + "tcp_none": 0, + "udp_socket": 1, + } + acc.AssertContainsFields(t, "netstat", fields3, nil) } diff --git a/plugins/system/ps.go b/plugins/system/ps.go index 0b7a38527..d0c35c62c 100644 --- a/plugins/system/ps.go +++ b/plugins/system/ps.go @@ -1,16 +1,12 @@ package system import ( - "fmt" gonet "net" "os" - "reflect" "strings" - "testing" "github.com/influxdb/telegraf/internal" "github.com/influxdb/telegraf/plugins" - "github.com/influxdb/telegraf/testutil" dc "github.com/fsouza/go-dockerclient" "github.com/shirou/gopsutil/cpu" @@ -18,8 +14,6 @@ import ( "github.com/shirou/gopsutil/docker" "github.com/shirou/gopsutil/mem" "github.com/shirou/gopsutil/net" - - "github.com/stretchr/testify/assert" ) type DockerContainerStat struct { @@ -172,49 +166,3 @@ func (s *systemPS) DockerStat() ([]*DockerContainerStat, error) { return stats, nil } - -// Asserts that a given accumulator contains a measurment of type float64 with -// specific tags within a certain distance of a given expected value. Asserts a failure -// if the measurement is of the wrong type, or if no matching measurements are found -// -// Paramaters: -// t *testing.T : Testing object to use -// acc testutil.Accumulator: Accumulator to examine -// measurement string : Name of the measurement to examine -// expectedValue float64 : Value to search for within the measurement -// delta float64 : Maximum acceptable distance of an accumulated value -// from the expectedValue parameter. Useful when -// floating-point arithmatic imprecision makes looking -// for an exact match impractical -// tags map[string]string : Tag set the found measurement must have. Set to nil to -// ignore the tag set. -func assertContainsTaggedFloat( - t *testing.T, - acc *testutil.Accumulator, - measurement string, - expectedValue float64, - delta float64, - tags map[string]string, -) { - var actualValue float64 - for _, pt := range acc.Points { - if pt.Measurement == measurement { - if (tags == nil) || reflect.DeepEqual(pt.Tags, tags) { - if value, ok := pt.Fields["value"].(float64); ok { - actualValue = value - if (value >= expectedValue-delta) && (value <= expectedValue+delta) { - // Found the point, return without failing - return - } - } else { - assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", - measurement)) - } - - } - } - } - msg := fmt.Sprintf("Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", - measurement, delta, expectedValue, actualValue) - assert.Fail(t, msg) -} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index d31c71ef5..256c4e105 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -4,7 +4,10 @@ import ( "fmt" "reflect" "sync" + "testing" "time" + + "github.com/stretchr/testify/assert" ) // Point defines a single point measurement @@ -106,70 +109,26 @@ func (a *Accumulator) Get(measurement string) (*Point, bool) { return nil, false } -// CheckValue calls CheckFieldsValue passing a single-value map as fields -func (a *Accumulator) CheckValue(measurement string, val interface{}) bool { - return a.CheckFieldsValue(measurement, map[string]interface{}{"value": val}) -} - -// CheckValue checks that the accumulators point for the given measurement -// is the same as the given value. -func (a *Accumulator) CheckFieldsValue(measurement string, fields map[string]interface{}) bool { - for _, p := range a.Points { - if p.Measurement == measurement { - if reflect.DeepEqual(fields, p.Fields) { - return true - } else { - fmt.Printf("Measurement %s Failure, expected: %v, got %v\n", - measurement, fields, p.Fields) - return false - } +// NFields returns the total number of fields in the accumulator, across all +// measurements +func (a *Accumulator) NFields() int { + counter := 0 + for _, pt := range a.Points { + for _, _ = range pt.Fields { + counter++ } } - fmt.Printf("Measurement %s, fields %s not found\n", measurement, fields) - return false + return counter } -// CheckTaggedValue calls ValidateTaggedValue -func (a *Accumulator) CheckTaggedValue( - measurement string, - val interface{}, - tags map[string]string, -) bool { - return a.ValidateTaggedValue(measurement, val, tags) == nil -} - -// ValidateTaggedValue calls ValidateTaggedFieldsValue passing a single-value map as fields -func (a *Accumulator) ValidateTaggedValue( - measurement string, - val interface{}, - tags map[string]string, -) error { - return a.ValidateTaggedFieldsValue(measurement, map[string]interface{}{"value": val}, tags) -} - -// ValidateValue calls ValidateTaggedValue -func (a *Accumulator) ValidateValue(measurement string, val interface{}) error { - return a.ValidateTaggedValue(measurement, val, nil) -} - -// CheckTaggedFieldsValue calls ValidateTaggedFieldsValue -func (a *Accumulator) CheckTaggedFieldsValue( +func (a *Accumulator) AssertContainsFields( + t *testing.T, measurement string, fields map[string]interface{}, tags map[string]string, -) bool { - return a.ValidateTaggedFieldsValue(measurement, fields, tags) == nil -} - -// ValidateTaggedValue validates that the given measurement and value exist -// in the accumulator and with the given tags. -func (a *Accumulator) ValidateTaggedFieldsValue( - measurement string, - fields map[string]interface{}, - tags map[string]string, -) error { +) { if tags == nil { - tags = map[string]string{} + tags = make(map[string]string) } for _, p := range a.Points { if !reflect.DeepEqual(tags, p.Tags) { @@ -178,53 +137,27 @@ func (a *Accumulator) ValidateTaggedFieldsValue( if p.Measurement == measurement { if !reflect.DeepEqual(fields, p.Fields) { - return fmt.Errorf("%v != %v ", fields, p.Fields) - } - return nil - } - } - - return fmt.Errorf("unknown measurement %s with tags %v", measurement, tags) -} - -// ValidateFieldsValue calls ValidateTaggedFieldsValue -func (a *Accumulator) ValidateFieldsValue( - measurement string, - fields map[string]interface{}, -) error { - return a.ValidateTaggedValue(measurement, fields, nil) -} - -func (a *Accumulator) ValidateTaggedFields( - measurement string, - fields map[string]interface{}, - tags map[string]string, -) error { - if tags == nil { - tags = map[string]string{} - } - for _, p := range a.Points { - if !reflect.DeepEqual(tags, p.Tags) { - continue - } - - if p.Measurement == measurement { - if !reflect.DeepEqual(fields, p.Fields) { - return fmt.Errorf("%v (%T) != %v (%T)", + msg := fmt.Sprintf("Actual:\n %v (%T) \nExpected:\n %v (%T)", p.Fields, p.Fields, fields, fields) + assert.Fail(t, msg) } - return nil + return } } - return fmt.Errorf("unknown measurement %s with tags %v", measurement, tags) + msg := fmt.Sprintf("unknown measurement %s with tags %v", measurement, tags) + assert.Fail(t, msg) } // HasIntValue returns true if the measurement has an Int value -func (a *Accumulator) HasIntValue(measurement string) bool { +func (a *Accumulator) HasIntField(measurement string, field string) bool { for _, p := range a.Points { if p.Measurement == measurement { - _, ok := p.Fields["value"].(int64) - return ok + for fieldname, value := range p.Fields { + if fieldname == field { + _, ok := value.(int64) + return ok + } + } } } @@ -232,11 +165,15 @@ func (a *Accumulator) HasIntValue(measurement string) bool { } // HasUIntValue returns true if the measurement has a UInt value -func (a *Accumulator) HasUIntValue(measurement string) bool { +func (a *Accumulator) HasUIntField(measurement string, field string) bool { for _, p := range a.Points { if p.Measurement == measurement { - _, ok := p.Fields["value"].(uint64) - return ok + for fieldname, value := range p.Fields { + if fieldname == field { + _, ok := value.(uint64) + return ok + } + } } } @@ -244,11 +181,15 @@ func (a *Accumulator) HasUIntValue(measurement string) bool { } // HasFloatValue returns true if the given measurement has a float value -func (a *Accumulator) HasFloatValue(measurement string) bool { +func (a *Accumulator) HasFloatField(measurement string, field string) bool { for _, p := range a.Points { if p.Measurement == measurement { - _, ok := p.Fields["value"].(float64) - return ok + for fieldname, value := range p.Fields { + if fieldname == field { + _, ok := value.(float64) + return ok + } + } } } From 2e20fc413c9501e8b626c8c110a4ed8bf5310128 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Jan 2016 17:28:15 -0700 Subject: [PATCH 030/103] 0.3.0 unit tests: aerospike, apache, bcache --- plugins/aerospike/aerospike_test.go | 104 ++++++++++++++-------------- plugins/apache/apache_test.go | 59 +++++++--------- plugins/bcache/bcache_test.go | 96 +++++++++---------------- plugins/system/disk_test.go | 4 +- plugins/system/docker_test.go | 2 +- plugins/system/memory_test.go | 4 +- plugins/system/net_test.go | 6 +- testutil/accumulator.go | 24 +++++-- 8 files changed, 139 insertions(+), 160 deletions(-) diff --git a/plugins/aerospike/aerospike_test.go b/plugins/aerospike/aerospike_test.go index 532ebaafb..1345bdc91 100644 --- a/plugins/aerospike/aerospike_test.go +++ b/plugins/aerospike/aerospike_test.go @@ -4,7 +4,6 @@ import ( "github.com/influxdb/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "reflect" "testing" ) @@ -31,7 +30,7 @@ func TestAerospikeStatistics(t *testing.T) { } for _, metric := range asMetrics { - assert.True(t, acc.HasIntValue(metric), metric) + assert.True(t, acc.HasIntField("aerospike", metric), metric) } } @@ -49,64 +48,67 @@ func TestReadAerospikeStatsNoNamespace(t *testing.T) { "stat_read_reqs": "12345", } readAerospikeStats(stats, &acc, "host1", "") - for k := range stats { - if k == "stat-write-errs" { - k = "stat_write_errs" - } - assert.True(t, acc.HasMeasurement(k)) - assert.True(t, acc.CheckValue(k, int64(12345))) - } -} -func TestReadAerospikeStatsNamespace(t *testing.T) { - var acc testutil.Accumulator - stats := map[string]string{ - "stat_write_errs": "12345", - "stat_read_reqs": "12345", + fields := map[string]interface{}{ + "stat_write_errs": int64(12345), + "stat_read_reqs": int64(12345), } - readAerospikeStats(stats, &acc, "host1", "test") - tags := map[string]string{ "aerospike_host": "host1", - "namespace": "test", - } - for k := range stats { - assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil) + "namespace": "_service", } + acc.AssertContainsTaggedFields(t, "aerospike", fields, tags) } -func TestAerospikeUnmarshalList(t *testing.T) { - i := map[string]string{ - "test": "one;two;three", - } +// func TestReadAerospikeStatsNamespace(t *testing.T) { +// var acc testutil.Accumulator +// stats := map[string]string{ +// "stat_write_errs": "12345", +// "stat_read_reqs": "12345", +// } +// readAerospikeStats(stats, &acc, "host1", "test") - expected := []string{"one", "two", "three"} +// tags := map[string]string{ +// "aerospike_host": "host1", +// "namespace": "test", +// } +// for k := range stats { +// assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil) +// } +// } - list, err := unmarshalListInfo(i, "test2") - assert.True(t, err != nil) +// func TestAerospikeUnmarshalList(t *testing.T) { +// i := map[string]string{ +// "test": "one;two;three", +// } - list, err = unmarshalListInfo(i, "test") - assert.True(t, err == nil) - equal := true - for ix := range expected { - if list[ix] != expected[ix] { - equal = false - break - } - } - assert.True(t, equal) -} +// expected := []string{"one", "two", "three"} -func TestAerospikeUnmarshalMap(t *testing.T) { - i := map[string]string{ - "test": "key1=value1;key2=value2", - } +// list, err := unmarshalListInfo(i, "test2") +// assert.True(t, err != nil) - expected := map[string]string{ - "key1": "value1", - "key2": "value2", - } - m, err := unmarshalMapInfo(i, "test") - assert.True(t, err == nil) - assert.True(t, reflect.DeepEqual(m, expected)) -} +// list, err = unmarshalListInfo(i, "test") +// assert.True(t, err == nil) +// equal := true +// for ix := range expected { +// if list[ix] != expected[ix] { +// equal = false +// break +// } +// } +// assert.True(t, equal) +// } + +// func TestAerospikeUnmarshalMap(t *testing.T) { +// i := map[string]string{ +// "test": "key1=value1;key2=value2", +// } + +// expected := map[string]string{ +// "key1": "value1", +// "key2": "value2", +// } +// m, err := unmarshalMapInfo(i, "test") +// assert.True(t, err == nil) +// assert.True(t, reflect.DeepEqual(m, expected)) +// } diff --git a/plugins/apache/apache_test.go b/plugins/apache/apache_test.go index 9688302ac..16c319974 100644 --- a/plugins/apache/apache_test.go +++ b/plugins/apache/apache_test.go @@ -8,7 +8,6 @@ import ( "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -44,37 +43,31 @@ func TestHTTPApache(t *testing.T) { err := a.Gather(&acc) require.NoError(t, err) - testInt := []struct { - measurement string - value float64 - }{ - {"TotalAccesses", 1.29811861e+08}, - {"TotalkBytes", 5.213701865e+09}, - {"CPULoad", 6.51929}, - {"Uptime", 941553}, - {"ReqPerSec", 137.87}, - {"BytesPerSec", 5.67024e+06}, - {"BytesPerReq", 41127.4}, - {"BusyWorkers", 270}, - {"IdleWorkers", 630}, - {"ConnsTotal", 1451}, - {"ConnsAsyncWriting", 32}, - {"ConnsAsyncKeepAlive", 945}, - {"ConnsAsyncClosing", 205}, - {"scboard_waiting", 630}, - {"scboard_starting", 0}, - {"scboard_reading", 157}, - {"scboard_sending", 113}, - {"scboard_keepalive", 0}, - {"scboard_dnslookup", 0}, - {"scboard_closing", 0}, - {"scboard_logging", 0}, - {"scboard_finishing", 0}, - {"scboard_idle_cleanup", 0}, - {"scboard_open", 2850}, - } - - for _, test := range testInt { - assert.True(t, acc.CheckValue(test.measurement, test.value)) + fields := map[string]interface{}{ + "TotalAccesses": float64(1.29811861e+08), + "TotalkBytes": float64(5.213701865e+09), + "CPULoad": float64(6.51929), + "Uptime": float64(941553), + "ReqPerSec": float64(137.87), + "BytesPerSec": float64(5.67024e+06), + "BytesPerReq": float64(41127.4), + "BusyWorkers": float64(270), + "IdleWorkers": float64(630), + "ConnsTotal": float64(1451), + "ConnsAsyncWriting": float64(32), + "ConnsAsyncKeepAlive": float64(945), + "ConnsAsyncClosing": float64(205), + "scboard_waiting": float64(630), + "scboard_starting": float64(0), + "scboard_reading": float64(157), + "scboard_sending": float64(113), + "scboard_keepalive": float64(0), + "scboard_dnslookup": float64(0), + "scboard_closing": float64(0), + "scboard_logging": float64(0), + "scboard_finishing": float64(0), + "scboard_idle_cleanup": float64(0), + "scboard_open": float64(2850), } + acc.AssertContainsFields(t, "apache", fields) } diff --git a/plugins/bcache/bcache_test.go b/plugins/bcache/bcache_test.go index b2b83bfec..0f34d016b 100644 --- a/plugins/bcache/bcache_test.go +++ b/plugins/bcache/bcache_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -29,11 +28,6 @@ var ( testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10" ) -type metrics struct { - name string - value uint64 -} - func TestBcacheGeneratesMetrics(t *testing.T) { err := os.MkdirAll(testBcacheUuidPath, 0755) require.NoError(t, err) @@ -53,70 +47,52 @@ func TestBcacheGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", []byte(dirty_data), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", + []byte(dirty_data), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", + []byte(bypassed), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", []byte(cache_bypass_hits), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", + []byte(cache_bypass_hits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", []byte(cache_bypass_misses), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", + []byte(cache_bypass_misses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", []byte(cache_hit_ratio), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", + []byte(cache_hit_ratio), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", []byte(cache_hits), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", + []byte(cache_hits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", []byte(cache_miss_collisions), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", + []byte(cache_miss_collisions), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", []byte(cache_misses), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", + []byte(cache_misses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", []byte(cache_readaheads), 0644) + err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", + []byte(cache_readaheads), 0644) require.NoError(t, err) - intMetrics := []*metrics{ - { - name: "dirty_data", - value: 1610612736, - }, - { - name: "bypassed", - value: 5167704440832, - }, - { - name: "cache_bypass_hits", - value: 146155333, - }, - { - name: "cache_bypass_misses", - value: 0, - }, - { - name: "cache_hit_ratio", - value: 90, - }, - { - name: "cache_hits", - value: 511469583, - }, - { - name: "cache_miss_collisions", - value: 157567, - }, - { - name: "cache_misses", - value: 50616331, - }, - { - name: "cache_readaheads", - value: 2, - }, + fields := map[string]interface{}{ + "dirty_data": uint64(1610612736), + "bypassed": uint64(5167704440832), + "cache_bypass_hits": uint64(146155333), + "cache_bypass_misses": uint64(0), + "cache_hit_ratio": uint64(90), + "cache_hits": uint64(511469583), + "cache_miss_collisions": uint64(157567), + "cache_misses": uint64(50616331), + "cache_readaheads": uint64(2), } tags := map[string]string{ @@ -126,27 +102,19 @@ func TestBcacheGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator - //all devs + // all devs b := &Bcache{BcachePath: testBcachePath} err = b.Gather(&acc) require.NoError(t, err) + acc.AssertContainsTaggedFields(t, "bcache", fields, tags) - for _, metric := range intMetrics { - assert.True(t, acc.HasUIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } - - //one exist dev + // one exist dev b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}} err = b.Gather(&acc) require.NoError(t, err) - - for _, metric := range intMetrics { - assert.True(t, acc.HasUIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } + acc.AssertContainsTaggedFields(t, "bcache", fields, tags) err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) diff --git a/plugins/system/disk_test.go b/plugins/system/disk_test.go index 6ad7b171b..6ea110fef 100644 --- a/plugins/system/disk_test.go +++ b/plugins/system/disk_test.go @@ -68,8 +68,8 @@ func TestDiskStats(t *testing.T) { "inodes_free": uint64(468), //tags2) "inodes_used": uint64(2000), //tags2) } - acc.AssertContainsFields(t, "disk", fields1, tags1) - acc.AssertContainsFields(t, "disk", fields2, tags2) + acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) + acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) // We expect 6 more DiskPoints to show up with an explicit match on "/" // and /home not matching the /dev in Mountpoints diff --git a/plugins/system/docker_test.go b/plugins/system/docker_test.go index 5bfcf986e..eb9cb41c7 100644 --- a/plugins/system/docker_test.go +++ b/plugins/system/docker_test.go @@ -116,5 +116,5 @@ func TestDockerStats_GenerateStats(t *testing.T) { "total_unevictable": uint64(27), } - acc.AssertContainsFields(t, "docker", fields, dockertags) + acc.AssertContainsTaggedFields(t, "docker", fields, dockertags) } diff --git a/plugins/system/memory_test.go b/plugins/system/memory_test.go index ca4a07bae..bf461e2e2 100644 --- a/plugins/system/memory_test.go +++ b/plugins/system/memory_test.go @@ -53,7 +53,7 @@ func TestMemStats(t *testing.T) { "cached": uint64(0), "buffered": uint64(0), } - acc.AssertContainsFields(t, "mem", memfields, nil) + acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string)) acc.Points = nil @@ -68,5 +68,5 @@ func TestMemStats(t *testing.T) { "in": uint64(7), "out": uint64(830), } - acc.AssertContainsFields(t, "swap", swapfields, nil) + acc.AssertContainsTaggedFields(t, "swap", swapfields, make(map[string]string)) } diff --git a/plugins/system/net_test.go b/plugins/system/net_test.go index ee6010580..ba71848d1 100644 --- a/plugins/system/net_test.go +++ b/plugins/system/net_test.go @@ -74,13 +74,13 @@ func TestNetStats(t *testing.T) { "drop_in": uint64(7), "drop_out": uint64(1), } - acc.AssertContainsFields(t, "net", fields1, ntags) + acc.AssertContainsTaggedFields(t, "net", fields1, ntags) fields2 := map[string]interface{}{ "udp_noports": int64(892592), "udp_indatagrams": int64(4655), } - acc.AssertContainsFields(t, "net", fields2, nil) + acc.AssertContainsTaggedFields(t, "net", fields2, make(map[string]string)) acc.Points = nil @@ -102,5 +102,5 @@ func TestNetStats(t *testing.T) { "tcp_none": 0, "udp_socket": 1, } - acc.AssertContainsFields(t, "netstat", fields3, nil) + acc.AssertContainsTaggedFields(t, "netstat", fields3, make(map[string]string)) } diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 256c4e105..3262db169 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -121,15 +121,12 @@ func (a *Accumulator) NFields() int { return counter } -func (a *Accumulator) AssertContainsFields( +func (a *Accumulator) AssertContainsTaggedFields( t *testing.T, measurement string, fields map[string]interface{}, tags map[string]string, ) { - if tags == nil { - tags = make(map[string]string) - } for _, p := range a.Points { if !reflect.DeepEqual(tags, p.Tags) { continue @@ -148,6 +145,25 @@ func (a *Accumulator) AssertContainsFields( assert.Fail(t, msg) } +func (a *Accumulator) AssertContainsFields( + t *testing.T, + measurement string, + fields map[string]interface{}, +) { + for _, p := range a.Points { + if p.Measurement == measurement { + if !reflect.DeepEqual(fields, p.Fields) { + msg := fmt.Sprintf("Actual:\n %v (%T) \nExpected:\n %v (%T)", + p.Fields, p.Fields, fields, fields) + assert.Fail(t, msg) + } + return + } + } + msg := fmt.Sprintf("unknown measurement %s", measurement) + assert.Fail(t, msg) +} + // HasIntValue returns true if the measurement has an Int value func (a *Accumulator) HasIntField(measurement string, field string) bool { for _, p := range a.Points { From c4a7711e02f5b75ac9a209c26ffa3e5c396461a1 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 5 Jan 2016 18:06:30 -0700 Subject: [PATCH 031/103] 0.3.0 unit tests: disque and elasticsearch --- plugins/disque/disque_test.go | 109 ++--- plugins/elasticsearch/elasticsearch_test.go | 62 +-- plugins/elasticsearch/testdata_test.go | 502 ++++++++++---------- 3 files changed, 309 insertions(+), 364 deletions(-) diff --git a/plugins/disque/disque_test.go b/plugins/disque/disque_test.go index 0a4722d93..91c7dc979 100644 --- a/plugins/disque/disque_test.go +++ b/plugins/disque/disque_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -55,42 +54,26 @@ func TestDisqueGeneratesMetrics(t *testing.T) { err = r.Gather(&acc) require.NoError(t, err) - checkInt := []struct { - name string - value uint64 - }{ - {"uptime", 1452705}, - {"clients", 31}, - {"blocked_clients", 13}, - {"used_memory", 1840104}, - {"used_memory_rss", 3227648}, - {"used_memory_peak", 89603656}, - {"total_connections_received", 5062777}, - {"total_commands_processed", 12308396}, - {"instantaneous_ops_per_sec", 18}, - {"latest_fork_usec", 1644}, - {"registered_jobs", 360}, - {"registered_queues", 12}, - } - - for _, c := range checkInt { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - checkFloat := []struct { - name string - value float64 - }{ - {"mem_fragmentation_ratio", 1.75}, - {"used_cpu_sys", 19585.73}, - {"used_cpu_user", 11255.96}, - {"used_cpu_sys_children", 1.75}, - {"used_cpu_user_children", 1.91}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) + fields := map[string]interface{}{ + "uptime": uint64(1452705), + "clients": uint64(31), + "blocked_clients": uint64(13), + "used_memory": uint64(1840104), + "used_memory_rss": uint64(3227648), + "used_memory_peak": uint64(89603656), + "total_connections_received": uint64(5062777), + "total_commands_processed": uint64(12308396), + "instantaneous_ops_per_sec": uint64(18), + "latest_fork_usec": uint64(1644), + "registered_jobs": uint64(360), + "registered_queues": uint64(12), + "mem_fragmentation_ratio": float64(1.75), + "used_cpu_sys": float64(19585.73), + "used_cpu_user": float64(11255.96), + "used_cpu_sys_children": float64(1.75), + "used_cpu_user_children": float64(1.91), } + acc.AssertContainsFields(t, "disque", fields) } func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) { @@ -137,42 +120,26 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) { err = r.Gather(&acc) require.NoError(t, err) - checkInt := []struct { - name string - value uint64 - }{ - {"uptime", 1452705}, - {"clients", 31}, - {"blocked_clients", 13}, - {"used_memory", 1840104}, - {"used_memory_rss", 3227648}, - {"used_memory_peak", 89603656}, - {"total_connections_received", 5062777}, - {"total_commands_processed", 12308396}, - {"instantaneous_ops_per_sec", 18}, - {"latest_fork_usec", 1644}, - {"registered_jobs", 360}, - {"registered_queues", 12}, - } - - for _, c := range checkInt { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - checkFloat := []struct { - name string - value float64 - }{ - {"mem_fragmentation_ratio", 1.75}, - {"used_cpu_sys", 19585.73}, - {"used_cpu_user", 11255.96}, - {"used_cpu_sys_children", 1.75}, - {"used_cpu_user_children", 1.91}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) + fields := map[string]interface{}{ + "uptime": uint64(1452705), + "clients": uint64(31), + "blocked_clients": uint64(13), + "used_memory": uint64(1840104), + "used_memory_rss": uint64(3227648), + "used_memory_peak": uint64(89603656), + "total_connections_received": uint64(5062777), + "total_commands_processed": uint64(12308396), + "instantaneous_ops_per_sec": uint64(18), + "latest_fork_usec": uint64(1644), + "registered_jobs": uint64(360), + "registered_queues": uint64(12), + "mem_fragmentation_ratio": float64(1.75), + "used_cpu_sys": float64(19585.73), + "used_cpu_user": float64(11255.96), + "used_cpu_sys_children": float64(1.75), + "used_cpu_user_children": float64(1.91), } + acc.AssertContainsFields(t, "disque", fields) } const testOutput = `# Server diff --git a/plugins/elasticsearch/elasticsearch_test.go b/plugins/elasticsearch/elasticsearch_test.go index c697593e9..62c3cb8fd 100644 --- a/plugins/elasticsearch/elasticsearch_test.go +++ b/plugins/elasticsearch/elasticsearch_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) @@ -52,23 +52,15 @@ func TestElasticsearch(t *testing.T) { "node_host": "test", } - testTables := []map[string]float64{ - indicesExpected, - osExpected, - processExpected, - jvmExpected, - threadPoolExpected, - fsExpected, - transportExpected, - httpExpected, - breakersExpected, - } - - for _, testTable := range testTables { - for k, v := range testTable { - assert.NoError(t, acc.ValidateTaggedValue(k, v, tags)) - } - } + acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags) } func TestGatherClusterStats(t *testing.T) { @@ -80,29 +72,15 @@ func TestGatherClusterStats(t *testing.T) { var acc testutil.Accumulator require.NoError(t, es.Gather(&acc)) - var clusterHealthTests = []struct { - measurement string - fields map[string]interface{} - tags map[string]string - }{ - { - "cluster_health", - clusterHealthExpected, - map[string]string{"name": "elasticsearch_telegraf"}, - }, - { - "indices", - v1IndexExpected, - map[string]string{"index": "v1"}, - }, - { - "indices", - v2IndexExpected, - map[string]string{"index": "v2"}, - }, - } + acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", + clusterHealthExpected, + map[string]string{"name": "elasticsearch_telegraf"}) - for _, exp := range clusterHealthTests { - assert.NoError(t, acc.ValidateTaggedFields(exp.measurement, exp.fields, exp.tags)) - } + acc.AssertContainsTaggedFields(t, "elasticsearch_indices", + v1IndexExpected, + map[string]string{"index": "v1"}) + + acc.AssertContainsTaggedFields(t, "elasticsearch_indices", + v2IndexExpected, + map[string]string{"index": "v2"}) } diff --git a/plugins/elasticsearch/testdata_test.go b/plugins/elasticsearch/testdata_test.go index 7fc777553..03e512f81 100644 --- a/plugins/elasticsearch/testdata_test.go +++ b/plugins/elasticsearch/testdata_test.go @@ -489,271 +489,271 @@ const statsResponse = ` } ` -var indicesExpected = map[string]float64{ - "indices_id_cache_memory_size_in_bytes": 0, - "indices_completion_size_in_bytes": 0, - "indices_suggest_total": 0, - "indices_suggest_time_in_millis": 0, - "indices_suggest_current": 0, - "indices_query_cache_memory_size_in_bytes": 0, - "indices_query_cache_evictions": 0, - "indices_query_cache_hit_count": 0, - "indices_query_cache_miss_count": 0, - "indices_store_size_in_bytes": 37715234, - "indices_store_throttle_time_in_millis": 215, - "indices_merges_current_docs": 0, - "indices_merges_current_size_in_bytes": 0, - "indices_merges_total": 133, - "indices_merges_total_time_in_millis": 21060, - "indices_merges_total_docs": 203672, - "indices_merges_total_size_in_bytes": 142900226, - "indices_merges_current": 0, - "indices_filter_cache_memory_size_in_bytes": 7384, - "indices_filter_cache_evictions": 0, - "indices_indexing_index_total": 84790, - "indices_indexing_index_time_in_millis": 29680, - "indices_indexing_index_current": 0, - "indices_indexing_noop_update_total": 0, - "indices_indexing_throttle_time_in_millis": 0, - "indices_indexing_delete_total": 13879, - "indices_indexing_delete_time_in_millis": 1139, - "indices_indexing_delete_current": 0, - "indices_get_exists_time_in_millis": 0, - "indices_get_missing_total": 1, - "indices_get_missing_time_in_millis": 2, - "indices_get_current": 0, - "indices_get_total": 1, - "indices_get_time_in_millis": 2, - "indices_get_exists_total": 0, - "indices_refresh_total": 1076, - "indices_refresh_total_time_in_millis": 20078, - "indices_percolate_current": 0, - "indices_percolate_memory_size_in_bytes": -1, - "indices_percolate_queries": 0, - "indices_percolate_total": 0, - "indices_percolate_time_in_millis": 0, - "indices_translog_operations": 17702, - "indices_translog_size_in_bytes": 17, - "indices_recovery_current_as_source": 0, - "indices_recovery_current_as_target": 0, - "indices_recovery_throttle_time_in_millis": 0, - "indices_docs_count": 29652, - "indices_docs_deleted": 5229, - "indices_flush_total_time_in_millis": 2401, - "indices_flush_total": 115, - "indices_fielddata_memory_size_in_bytes": 12996, - "indices_fielddata_evictions": 0, - "indices_search_fetch_current": 0, - "indices_search_open_contexts": 0, - "indices_search_query_total": 1452, - "indices_search_query_time_in_millis": 5695, - "indices_search_query_current": 0, - "indices_search_fetch_total": 414, - "indices_search_fetch_time_in_millis": 146, - "indices_warmer_current": 0, - "indices_warmer_total": 2319, - "indices_warmer_total_time_in_millis": 448, - "indices_segments_count": 134, - "indices_segments_memory_in_bytes": 1285212, - "indices_segments_index_writer_memory_in_bytes": 0, - "indices_segments_index_writer_max_memory_in_bytes": 172368955, - "indices_segments_version_map_memory_in_bytes": 611844, - "indices_segments_fixed_bit_set_memory_in_bytes": 0, +var indicesExpected = map[string]interface{}{ + "id_cache_memory_size_in_bytes": float64(0), + "completion_size_in_bytes": float64(0), + "suggest_total": float64(0), + "suggest_time_in_millis": float64(0), + "suggest_current": float64(0), + "query_cache_memory_size_in_bytes": float64(0), + "query_cache_evictions": float64(0), + "query_cache_hit_count": float64(0), + "query_cache_miss_count": float64(0), + "store_size_in_bytes": float64(37715234), + "store_throttle_time_in_millis": float64(215), + "merges_current_docs": float64(0), + "merges_current_size_in_bytes": float64(0), + "merges_total": float64(133), + "merges_total_time_in_millis": float64(21060), + "merges_total_docs": float64(203672), + "merges_total_size_in_bytes": float64(142900226), + "merges_current": float64(0), + "filter_cache_memory_size_in_bytes": float64(7384), + "filter_cache_evictions": float64(0), + "indexing_index_total": float64(84790), + "indexing_index_time_in_millis": float64(29680), + "indexing_index_current": float64(0), + "indexing_noop_update_total": float64(0), + "indexing_throttle_time_in_millis": float64(0), + "indexing_delete_total": float64(13879), + "indexing_delete_time_in_millis": float64(1139), + "indexing_delete_current": float64(0), + "get_exists_time_in_millis": float64(0), + "get_missing_total": float64(1), + "get_missing_time_in_millis": float64(2), + "get_current": float64(0), + "get_total": float64(1), + "get_time_in_millis": float64(2), + "get_exists_total": float64(0), + "refresh_total": float64(1076), + "refresh_total_time_in_millis": float64(20078), + "percolate_current": float64(0), + "percolate_memory_size_in_bytes": float64(-1), + "percolate_queries": float64(0), + "percolate_total": float64(0), + "percolate_time_in_millis": float64(0), + "translog_operations": float64(17702), + "translog_size_in_bytes": float64(17), + "recovery_current_as_source": float64(0), + "recovery_current_as_target": float64(0), + "recovery_throttle_time_in_millis": float64(0), + "docs_count": float64(29652), + "docs_deleted": float64(5229), + "flush_total_time_in_millis": float64(2401), + "flush_total": float64(115), + "fielddata_memory_size_in_bytes": float64(12996), + "fielddata_evictions": float64(0), + "search_fetch_current": float64(0), + "search_open_contexts": float64(0), + "search_query_total": float64(1452), + "search_query_time_in_millis": float64(5695), + "search_query_current": float64(0), + "search_fetch_total": float64(414), + "search_fetch_time_in_millis": float64(146), + "warmer_current": float64(0), + "warmer_total": float64(2319), + "warmer_total_time_in_millis": float64(448), + "segments_count": float64(134), + "segments_memory_in_bytes": float64(1285212), + "segments_index_writer_memory_in_bytes": float64(0), + "segments_index_writer_max_memory_in_bytes": float64(172368955), + "segments_version_map_memory_in_bytes": float64(611844), + "segments_fixed_bit_set_memory_in_bytes": float64(0), } -var osExpected = map[string]float64{ - "os_swap_used_in_bytes": 0, - "os_swap_free_in_bytes": 487997440, - "os_timestamp": 1436460392944, - "os_mem_free_percent": 74, - "os_mem_used_percent": 25, - "os_mem_actual_free_in_bytes": 1565470720, - "os_mem_actual_used_in_bytes": 534159360, - "os_mem_free_in_bytes": 477761536, - "os_mem_used_in_bytes": 1621868544, +var osExpected = map[string]interface{}{ + "swap_used_in_bytes": float64(0), + "swap_free_in_bytes": float64(487997440), + "timestamp": float64(1436460392944), + "mem_free_percent": float64(74), + "mem_used_percent": float64(25), + "mem_actual_free_in_bytes": float64(1565470720), + "mem_actual_used_in_bytes": float64(534159360), + "mem_free_in_bytes": float64(477761536), + "mem_used_in_bytes": float64(1621868544), } -var processExpected = map[string]float64{ - "process_mem_total_virtual_in_bytes": 4747890688, - "process_timestamp": 1436460392945, - "process_open_file_descriptors": 160, - "process_cpu_total_in_millis": 15480, - "process_cpu_percent": 2, - "process_cpu_sys_in_millis": 1870, - "process_cpu_user_in_millis": 13610, +var processExpected = map[string]interface{}{ + "mem_total_virtual_in_bytes": float64(4747890688), + "timestamp": float64(1436460392945), + "open_file_descriptors": float64(160), + "cpu_total_in_millis": float64(15480), + "cpu_percent": float64(2), + "cpu_sys_in_millis": float64(1870), + "cpu_user_in_millis": float64(13610), } -var jvmExpected = map[string]float64{ - "jvm_timestamp": 1436460392945, - "jvm_uptime_in_millis": 202245, - "jvm_mem_non_heap_used_in_bytes": 39634576, - "jvm_mem_non_heap_committed_in_bytes": 40841216, - "jvm_mem_pools_young_max_in_bytes": 279183360, - "jvm_mem_pools_young_peak_used_in_bytes": 71630848, - "jvm_mem_pools_young_peak_max_in_bytes": 279183360, - "jvm_mem_pools_young_used_in_bytes": 32685760, - "jvm_mem_pools_survivor_peak_used_in_bytes": 8912888, - "jvm_mem_pools_survivor_peak_max_in_bytes": 34865152, - "jvm_mem_pools_survivor_used_in_bytes": 8912880, - "jvm_mem_pools_survivor_max_in_bytes": 34865152, - "jvm_mem_pools_old_peak_max_in_bytes": 724828160, - "jvm_mem_pools_old_used_in_bytes": 11110928, - "jvm_mem_pools_old_max_in_bytes": 724828160, - "jvm_mem_pools_old_peak_used_in_bytes": 14354608, - "jvm_mem_heap_used_in_bytes": 52709568, - "jvm_mem_heap_used_percent": 5, - "jvm_mem_heap_committed_in_bytes": 259522560, - "jvm_mem_heap_max_in_bytes": 1038876672, - "jvm_threads_peak_count": 45, - "jvm_threads_count": 44, - "jvm_gc_collectors_young_collection_count": 2, - "jvm_gc_collectors_young_collection_time_in_millis": 98, - "jvm_gc_collectors_old_collection_count": 1, - "jvm_gc_collectors_old_collection_time_in_millis": 24, - "jvm_buffer_pools_direct_count": 40, - "jvm_buffer_pools_direct_used_in_bytes": 6304239, - "jvm_buffer_pools_direct_total_capacity_in_bytes": 6304239, - "jvm_buffer_pools_mapped_count": 0, - "jvm_buffer_pools_mapped_used_in_bytes": 0, - "jvm_buffer_pools_mapped_total_capacity_in_bytes": 0, +var jvmExpected = map[string]interface{}{ + "timestamp": float64(1436460392945), + "uptime_in_millis": float64(202245), + "mem_non_heap_used_in_bytes": float64(39634576), + "mem_non_heap_committed_in_bytes": float64(40841216), + "mem_pools_young_max_in_bytes": float64(279183360), + "mem_pools_young_peak_used_in_bytes": float64(71630848), + "mem_pools_young_peak_max_in_bytes": float64(279183360), + "mem_pools_young_used_in_bytes": float64(32685760), + "mem_pools_survivor_peak_used_in_bytes": float64(8912888), + "mem_pools_survivor_peak_max_in_bytes": float64(34865152), + "mem_pools_survivor_used_in_bytes": float64(8912880), + "mem_pools_survivor_max_in_bytes": float64(34865152), + "mem_pools_old_peak_max_in_bytes": float64(724828160), + "mem_pools_old_used_in_bytes": float64(11110928), + "mem_pools_old_max_in_bytes": float64(724828160), + "mem_pools_old_peak_used_in_bytes": float64(14354608), + "mem_heap_used_in_bytes": float64(52709568), + "mem_heap_used_percent": float64(5), + "mem_heap_committed_in_bytes": float64(259522560), + "mem_heap_max_in_bytes": float64(1038876672), + "threads_peak_count": float64(45), + "threads_count": float64(44), + "gc_collectors_young_collection_count": float64(2), + "gc_collectors_young_collection_time_in_millis": float64(98), + "gc_collectors_old_collection_count": float64(1), + "gc_collectors_old_collection_time_in_millis": float64(24), + "buffer_pools_direct_count": float64(40), + "buffer_pools_direct_used_in_bytes": float64(6304239), + "buffer_pools_direct_total_capacity_in_bytes": float64(6304239), + "buffer_pools_mapped_count": float64(0), + "buffer_pools_mapped_used_in_bytes": float64(0), + "buffer_pools_mapped_total_capacity_in_bytes": float64(0), } -var threadPoolExpected = map[string]float64{ - "thread_pool_merge_threads": 6, - "thread_pool_merge_queue": 4, - "thread_pool_merge_active": 5, - "thread_pool_merge_rejected": 2, - "thread_pool_merge_largest": 5, - "thread_pool_merge_completed": 1, - "thread_pool_bulk_threads": 4, - "thread_pool_bulk_queue": 5, - "thread_pool_bulk_active": 7, - "thread_pool_bulk_rejected": 3, - "thread_pool_bulk_largest": 1, - "thread_pool_bulk_completed": 4, - "thread_pool_warmer_threads": 2, - "thread_pool_warmer_queue": 7, - "thread_pool_warmer_active": 3, - "thread_pool_warmer_rejected": 2, - "thread_pool_warmer_largest": 3, - "thread_pool_warmer_completed": 1, - "thread_pool_get_largest": 2, - "thread_pool_get_completed": 1, - "thread_pool_get_threads": 1, - "thread_pool_get_queue": 8, - "thread_pool_get_active": 4, - "thread_pool_get_rejected": 3, - "thread_pool_index_threads": 6, - "thread_pool_index_queue": 8, - "thread_pool_index_active": 4, - "thread_pool_index_rejected": 2, - "thread_pool_index_largest": 3, - "thread_pool_index_completed": 6, - "thread_pool_suggest_threads": 2, - "thread_pool_suggest_queue": 7, - "thread_pool_suggest_active": 2, - "thread_pool_suggest_rejected": 1, - "thread_pool_suggest_largest": 8, - "thread_pool_suggest_completed": 3, - "thread_pool_fetch_shard_store_queue": 7, - "thread_pool_fetch_shard_store_active": 4, - "thread_pool_fetch_shard_store_rejected": 2, - "thread_pool_fetch_shard_store_largest": 4, - "thread_pool_fetch_shard_store_completed": 1, - "thread_pool_fetch_shard_store_threads": 1, - "thread_pool_management_threads": 2, - "thread_pool_management_queue": 3, - "thread_pool_management_active": 1, - "thread_pool_management_rejected": 6, - "thread_pool_management_largest": 2, - "thread_pool_management_completed": 22, - "thread_pool_percolate_queue": 23, - "thread_pool_percolate_active": 13, - "thread_pool_percolate_rejected": 235, - "thread_pool_percolate_largest": 23, - "thread_pool_percolate_completed": 33, - "thread_pool_percolate_threads": 123, - "thread_pool_listener_active": 4, - "thread_pool_listener_rejected": 8, - "thread_pool_listener_largest": 1, - "thread_pool_listener_completed": 1, - "thread_pool_listener_threads": 1, - "thread_pool_listener_queue": 2, - "thread_pool_search_rejected": 7, - "thread_pool_search_largest": 2, - "thread_pool_search_completed": 4, - "thread_pool_search_threads": 5, - "thread_pool_search_queue": 7, - "thread_pool_search_active": 2, - "thread_pool_fetch_shard_started_threads": 3, - "thread_pool_fetch_shard_started_queue": 1, - "thread_pool_fetch_shard_started_active": 5, - "thread_pool_fetch_shard_started_rejected": 6, - "thread_pool_fetch_shard_started_largest": 4, - "thread_pool_fetch_shard_started_completed": 54, - "thread_pool_refresh_rejected": 4, - "thread_pool_refresh_largest": 8, - "thread_pool_refresh_completed": 3, - "thread_pool_refresh_threads": 23, - "thread_pool_refresh_queue": 7, - "thread_pool_refresh_active": 3, - "thread_pool_optimize_threads": 3, - "thread_pool_optimize_queue": 4, - "thread_pool_optimize_active": 1, - "thread_pool_optimize_rejected": 2, - "thread_pool_optimize_largest": 7, - "thread_pool_optimize_completed": 3, - "thread_pool_snapshot_largest": 1, - "thread_pool_snapshot_completed": 0, - "thread_pool_snapshot_threads": 8, - "thread_pool_snapshot_queue": 5, - "thread_pool_snapshot_active": 6, - "thread_pool_snapshot_rejected": 2, - "thread_pool_generic_threads": 1, - "thread_pool_generic_queue": 4, - "thread_pool_generic_active": 6, - "thread_pool_generic_rejected": 3, - "thread_pool_generic_largest": 2, - "thread_pool_generic_completed": 27, - "thread_pool_flush_threads": 3, - "thread_pool_flush_queue": 8, - "thread_pool_flush_active": 0, - "thread_pool_flush_rejected": 1, - "thread_pool_flush_largest": 5, - "thread_pool_flush_completed": 3, +var threadPoolExpected = map[string]interface{}{ + "merge_threads": float64(6), + "merge_queue": float64(4), + "merge_active": float64(5), + "merge_rejected": float64(2), + "merge_largest": float64(5), + "merge_completed": float64(1), + "bulk_threads": float64(4), + "bulk_queue": float64(5), + "bulk_active": float64(7), + "bulk_rejected": float64(3), + "bulk_largest": float64(1), + "bulk_completed": float64(4), + "warmer_threads": float64(2), + "warmer_queue": float64(7), + "warmer_active": float64(3), + "warmer_rejected": float64(2), + "warmer_largest": float64(3), + "warmer_completed": float64(1), + "get_largest": float64(2), + "get_completed": float64(1), + "get_threads": float64(1), + "get_queue": float64(8), + "get_active": float64(4), + "get_rejected": float64(3), + "index_threads": float64(6), + "index_queue": float64(8), + "index_active": float64(4), + "index_rejected": float64(2), + "index_largest": float64(3), + "index_completed": float64(6), + "suggest_threads": float64(2), + "suggest_queue": float64(7), + "suggest_active": float64(2), + "suggest_rejected": float64(1), + "suggest_largest": float64(8), + "suggest_completed": float64(3), + "fetch_shard_store_queue": float64(7), + "fetch_shard_store_active": float64(4), + "fetch_shard_store_rejected": float64(2), + "fetch_shard_store_largest": float64(4), + "fetch_shard_store_completed": float64(1), + "fetch_shard_store_threads": float64(1), + "management_threads": float64(2), + "management_queue": float64(3), + "management_active": float64(1), + "management_rejected": float64(6), + "management_largest": float64(2), + "management_completed": float64(22), + "percolate_queue": float64(23), + "percolate_active": float64(13), + "percolate_rejected": float64(235), + "percolate_largest": float64(23), + "percolate_completed": float64(33), + "percolate_threads": float64(123), + "listener_active": float64(4), + "listener_rejected": float64(8), + "listener_largest": float64(1), + "listener_completed": float64(1), + "listener_threads": float64(1), + "listener_queue": float64(2), + "search_rejected": float64(7), + "search_largest": float64(2), + "search_completed": float64(4), + "search_threads": float64(5), + "search_queue": float64(7), + "search_active": float64(2), + "fetch_shard_started_threads": float64(3), + "fetch_shard_started_queue": float64(1), + "fetch_shard_started_active": float64(5), + "fetch_shard_started_rejected": float64(6), + "fetch_shard_started_largest": float64(4), + "fetch_shard_started_completed": float64(54), + "refresh_rejected": float64(4), + "refresh_largest": float64(8), + "refresh_completed": float64(3), + "refresh_threads": float64(23), + "refresh_queue": float64(7), + "refresh_active": float64(3), + "optimize_threads": float64(3), + "optimize_queue": float64(4), + "optimize_active": float64(1), + "optimize_rejected": float64(2), + "optimize_largest": float64(7), + "optimize_completed": float64(3), + "snapshot_largest": float64(1), + "snapshot_completed": float64(0), + "snapshot_threads": float64(8), + "snapshot_queue": float64(5), + "snapshot_active": float64(6), + "snapshot_rejected": float64(2), + "generic_threads": float64(1), + "generic_queue": float64(4), + "generic_active": float64(6), + "generic_rejected": float64(3), + "generic_largest": float64(2), + "generic_completed": float64(27), + "flush_threads": float64(3), + "flush_queue": float64(8), + "flush_active": float64(0), + "flush_rejected": float64(1), + "flush_largest": float64(5), + "flush_completed": float64(3), } -var fsExpected = map[string]float64{ - "fs_timestamp": 1436460392946, - "fs_total_free_in_bytes": 16909316096, - "fs_total_available_in_bytes": 15894814720, - "fs_total_total_in_bytes": 19507089408, +var fsExpected = map[string]interface{}{ + "timestamp": float64(1436460392946), + "total_free_in_bytes": float64(16909316096), + "total_available_in_bytes": float64(15894814720), + "total_total_in_bytes": float64(19507089408), } -var transportExpected = map[string]float64{ - "transport_server_open": 13, - "transport_rx_count": 6, - "transport_rx_size_in_bytes": 1380, - "transport_tx_count": 6, - "transport_tx_size_in_bytes": 1380, +var transportExpected = map[string]interface{}{ + "server_open": float64(13), + "rx_count": float64(6), + "rx_size_in_bytes": float64(1380), + "tx_count": float64(6), + "tx_size_in_bytes": float64(1380), } -var httpExpected = map[string]float64{ - "http_current_open": 3, - "http_total_opened": 3, +var httpExpected = map[string]interface{}{ + "current_open": float64(3), + "total_opened": float64(3), } -var breakersExpected = map[string]float64{ - "breakers_fielddata_estimated_size_in_bytes": 0, - "breakers_fielddata_overhead": 1.03, - "breakers_fielddata_tripped": 0, - "breakers_fielddata_limit_size_in_bytes": 623326003, - "breakers_request_estimated_size_in_bytes": 0, - "breakers_request_overhead": 1.0, - "breakers_request_tripped": 0, - "breakers_request_limit_size_in_bytes": 415550668, - "breakers_parent_overhead": 1.0, - "breakers_parent_tripped": 0, - "breakers_parent_limit_size_in_bytes": 727213670, - "breakers_parent_estimated_size_in_bytes": 0, +var breakersExpected = map[string]interface{}{ + "fielddata_estimated_size_in_bytes": float64(0), + "fielddata_overhead": float64(1.03), + "fielddata_tripped": float64(0), + "fielddata_limit_size_in_bytes": float64(623326003), + "request_estimated_size_in_bytes": float64(0), + "request_overhead": float64(1.0), + "request_tripped": float64(0), + "request_limit_size_in_bytes": float64(415550668), + "parent_overhead": float64(1.0), + "parent_tripped": float64(0), + "parent_limit_size_in_bytes": float64(727213670), + "parent_estimated_size_in_bytes": float64(0), } From 524fddedb489eb9c74c5fb6aacf3b0e51a63e415 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 6 Jan 2016 16:11:16 -0700 Subject: [PATCH 032/103] 0.3.0 unit tests: exec, httpjson, and haproxy --- internal/internal.go | 2 +- plugins/aerospike/aerospike_test.go | 96 ++++++------- plugins/exec/exec_test.go | 206 +++------------------------- plugins/haproxy/haproxy.go | 2 +- plugins/haproxy/haproxy_test.go | 114 ++++++++------- plugins/httpjson/httpjson.go | 2 +- plugins/httpjson/httpjson_test.go | 145 ++++++++------------ testutil/accumulator.go | 31 +++-- 8 files changed, 221 insertions(+), 377 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index 93c467808..fc55ba529 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -51,7 +51,7 @@ func (f *JSONFlattener) FlattenJSON( } case float64: f.Fields[fieldname] = t - case bool, string, []interface{}: + case bool, string, []interface{}, nil: // ignored types return nil default: diff --git a/plugins/aerospike/aerospike_test.go b/plugins/aerospike/aerospike_test.go index 1345bdc91..3f4d909a2 100644 --- a/plugins/aerospike/aerospike_test.go +++ b/plugins/aerospike/aerospike_test.go @@ -1,10 +1,12 @@ package aerospike import ( + "reflect" + "testing" + "github.com/influxdb/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) func TestAerospikeStatistics(t *testing.T) { @@ -60,55 +62,57 @@ func TestReadAerospikeStatsNoNamespace(t *testing.T) { acc.AssertContainsTaggedFields(t, "aerospike", fields, tags) } -// func TestReadAerospikeStatsNamespace(t *testing.T) { -// var acc testutil.Accumulator -// stats := map[string]string{ -// "stat_write_errs": "12345", -// "stat_read_reqs": "12345", -// } -// readAerospikeStats(stats, &acc, "host1", "test") +func TestReadAerospikeStatsNamespace(t *testing.T) { + var acc testutil.Accumulator + stats := map[string]string{ + "stat_write_errs": "12345", + "stat_read_reqs": "12345", + } + readAerospikeStats(stats, &acc, "host1", "test") -// tags := map[string]string{ -// "aerospike_host": "host1", -// "namespace": "test", -// } -// for k := range stats { -// assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil) -// } -// } + fields := map[string]interface{}{ + "stat_write_errs": int64(12345), + "stat_read_reqs": int64(12345), + } + tags := map[string]string{ + "aerospike_host": "host1", + "namespace": "test", + } + acc.AssertContainsTaggedFields(t, "aerospike", fields, tags) +} -// func TestAerospikeUnmarshalList(t *testing.T) { -// i := map[string]string{ -// "test": "one;two;three", -// } +func TestAerospikeUnmarshalList(t *testing.T) { + i := map[string]string{ + "test": "one;two;three", + } -// expected := []string{"one", "two", "three"} + expected := []string{"one", "two", "three"} -// list, err := unmarshalListInfo(i, "test2") -// assert.True(t, err != nil) + list, err := unmarshalListInfo(i, "test2") + assert.True(t, err != nil) -// list, err = unmarshalListInfo(i, "test") -// assert.True(t, err == nil) -// equal := true -// for ix := range expected { -// if list[ix] != expected[ix] { -// equal = false -// break -// } -// } -// assert.True(t, equal) -// } + list, err = unmarshalListInfo(i, "test") + assert.True(t, err == nil) + equal := true + for ix := range expected { + if list[ix] != expected[ix] { + equal = false + break + } + } + assert.True(t, equal) +} -// func TestAerospikeUnmarshalMap(t *testing.T) { -// i := map[string]string{ -// "test": "key1=value1;key2=value2", -// } +func TestAerospikeUnmarshalMap(t *testing.T) { + i := map[string]string{ + "test": "key1=value1;key2=value2", + } -// expected := map[string]string{ -// "key1": "value1", -// "key2": "value2", -// } -// m, err := unmarshalMapInfo(i, "test") -// assert.True(t, err == nil) -// assert.True(t, reflect.DeepEqual(m, expected)) -// } + expected := map[string]string{ + "key1": "value1", + "key2": "value2", + } + m, err := unmarshalMapInfo(i, "test") + assert.True(t, err == nil) + assert.True(t, reflect.DeepEqual(m, expected)) +} diff --git a/plugins/exec/exec_test.go b/plugins/exec/exec_test.go index 3f0b6f4ce..bb94a2fd5 100644 --- a/plugins/exec/exec_test.go +++ b/plugins/exec/exec_test.go @@ -2,12 +2,11 @@ package exec import ( "fmt" + "testing" + "github.com/influxdb/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "math" - "testing" - "time" ) // Midnight 9/22/2015 @@ -37,10 +36,6 @@ type runnerMock struct { err error } -type clockMock struct { - now time.Time -} - func newRunnerMock(out []byte, err error) Runner { return &runnerMock{ out: out, @@ -48,215 +43,56 @@ func newRunnerMock(out []byte, err error) Runner { } } -func (r runnerMock) Run(command *Command) ([]byte, error) { +func (r runnerMock) Run(e *Exec) ([]byte, error) { if r.err != nil { return nil, r.err } return r.out, nil } -func newClockMock(now time.Time) Clock { - return &clockMock{now: now} -} - -func (c clockMock) Now() time.Time { - return c.now -} - func TestExec(t *testing.T) { - runner := newRunnerMock([]byte(validJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds+20, 0)) - command := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, + runner: newRunnerMock([]byte(validJson), nil), + Command: "testcommand arg1", + Name: "mycollector", } var acc testutil.Accumulator - initialPoints := len(acc.Points) err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints require.NoError(t, err) + assert.Equal(t, acc.NFields(), 4, "non-numeric measurements should be ignored") - checkFloat := []struct { - name string - value float64 - }{ - {"mycollector_num_processes", 82}, - {"mycollector_cpu_used", 8234}, - {"mycollector_cpu_free", 32}, - {"mycollector_percent", 0.81}, + fields := map[string]interface{}{ + "num_processes": float64(82), + "cpu_used": float64(8234), + "cpu_free": float64(32), + "percent": float64(0.81), } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - assert.Equal(t, deltaPoints, 4, "non-numeric measurements should be ignored") + acc.AssertContainsFields(t, "exec_mycollector", fields) } func TestExecMalformed(t *testing.T) { - runner := newRunnerMock([]byte(malformedJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds+20, 0)) - command := Command{ - Command: "badcommand arg1", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, + runner: newRunnerMock([]byte(malformedJson), nil), + Command: "badcommand arg1", + Name: "mycollector", } var acc testutil.Accumulator - initialPoints := len(acc.Points) err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints require.Error(t, err) - - assert.Equal(t, deltaPoints, 0, "No new points should have been added") + assert.Equal(t, acc.NFields(), 0, "No new points should have been added") } func TestCommandError(t *testing.T) { - runner := newRunnerMock(nil, fmt.Errorf("exit status code 1")) - clock := newClockMock(time.Unix(baseTimeSeconds+20, 0)) - command := Command{ - Command: "badcommand", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, + runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")), + Command: "badcommand", + Name: "mycollector", } var acc testutil.Accumulator - initialPoints := len(acc.Points) err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints require.Error(t, err) - - assert.Equal(t, deltaPoints, 0, "No new points should have been added") -} - -func TestExecNotEnoughTime(t *testing.T) { - runner := newRunnerMock([]byte(validJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds+5, 0)) - command := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, - } - - var acc testutil.Accumulator - initialPoints := len(acc.Points) - err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints - require.NoError(t, err) - - assert.Equal(t, deltaPoints, 0, "No new points should have been added") -} - -func TestExecUninitializedLastRunAt(t *testing.T) { - runner := newRunnerMock([]byte(validJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds, 0)) - command := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: math.MaxInt32, - // Uninitialized lastRunAt should default to time.Unix(0, 0), so this should - // run no matter what the interval is - } - - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{&command}, - } - - var acc testutil.Accumulator - initialPoints := len(acc.Points) - err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints - require.NoError(t, err) - - checkFloat := []struct { - name string - value float64 - }{ - {"mycollector_num_processes", 82}, - {"mycollector_cpu_used", 8234}, - {"mycollector_cpu_free", 32}, - {"mycollector_percent", 0.81}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - assert.Equal(t, deltaPoints, 4, "non-numeric measurements should be ignored") -} -func TestExecOneNotEnoughTimeAndOneEnoughTime(t *testing.T) { - runner := newRunnerMock([]byte(validJson), nil) - clock := newClockMock(time.Unix(baseTimeSeconds+5, 0)) - notEnoughTimeCommand := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: 10, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - enoughTimeCommand := Command{ - Command: "testcommand arg1", - Name: "mycollector", - Interval: 3, - lastRunAt: time.Unix(baseTimeSeconds, 0), - } - - e := &Exec{ - runner: runner, - clock: clock, - Commands: []*Command{¬EnoughTimeCommand, &enoughTimeCommand}, - } - - var acc testutil.Accumulator - initialPoints := len(acc.Points) - err := e.Gather(&acc) - deltaPoints := len(acc.Points) - initialPoints - require.NoError(t, err) - - checkFloat := []struct { - name string - value float64 - }{ - {"mycollector_num_processes", 82}, - {"mycollector_cpu_used", 8234}, - {"mycollector_cpu_free", 32}, - {"mycollector_percent", 0.81}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - assert.Equal(t, deltaPoints, 4, "Only one command should have been run") + assert.Equal(t, acc.NFields(), 0, "No new points should have been added") } diff --git a/plugins/haproxy/haproxy.go b/plugins/haproxy/haproxy.go index 2069af249..10f7dab3c 100644 --- a/plugins/haproxy/haproxy.go +++ b/plugins/haproxy/haproxy.go @@ -91,7 +91,7 @@ var sampleConfig = ` # If no servers are specified, then default to 127.0.0.1:1936 servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] # Or you can also use local socket(not work yet) - # servers = ["socket:/run/haproxy/admin.sock"] + # servers = ["socket://run/haproxy/admin.sock"] ` func (r *haproxy) SampleConfig() string { diff --git a/plugins/haproxy/haproxy_test.go b/plugins/haproxy/haproxy_test.go index 6f07d34d1..e514bc7ad 100644 --- a/plugins/haproxy/haproxy_test.go +++ b/plugins/haproxy/haproxy_test.go @@ -47,52 +47,39 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { "sv": "host0", } - assert.NoError(t, acc.ValidateTaggedValue("stot", uint64(171014), tags)) - - checkInt := []struct { - name string - value uint64 - }{ - - {"qmax", 81}, - {"scur", 288}, - {"smax", 713}, - {"bin", 5557055817}, - {"bout", 24096715169}, - {"dreq", 1102}, - {"dresp", 80}, - {"ereq", 95740}, - {"econ", 0}, - {"eresp", 0}, - {"wretr", 17}, - {"wredis", 19}, - {"active_servers", 1}, - {"backup_servers", 0}, - {"downtime", 0}, - {"throttle", 13}, - {"lbtot", 114}, - {"rate", 18}, - {"rate_max", 102}, - {"check_duration", 1}, - {"http_response.1xx", 0}, - {"http_response.2xx", 1314093}, - {"http_response.3xx", 537036}, - {"http_response.4xx", 123452}, - {"http_response.5xx", 11966}, - {"req_rate", 35}, - {"req_rate_max", 140}, - {"req_tot", 1987928}, - {"cli_abort", 0}, - {"srv_abort", 0}, - {"qtime", 0}, - {"ctime", 2}, - {"rtime", 23}, - {"ttime", 545}, - } - - for _, c := range checkInt { - assert.Equal(t, true, acc.CheckValue(c.name, c.value)) + fields := map[string]interface{}{ + "active_servers": uint64(1), + "backup_servers": uint64(0), + "bin": uint64(510913516), + "bout": uint64(2193856571), + "check_duration": uint64(10), + "cli_abort": uint64(73), + "ctime": uint64(2), + "downtime": uint64(0), + "dresp": uint64(0), + "econ": uint64(0), + "eresp": uint64(1), + "http_response.1xx": uint64(0), + "http_response.2xx": uint64(119534), + "http_response.3xx": uint64(48051), + "http_response.4xx": uint64(2345), + "http_response.5xx": uint64(1056), + "lbtot": uint64(171013), + "qcur": uint64(0), + "qmax": uint64(0), + "qtime": uint64(0), + "rate": uint64(3), + "rate_max": uint64(12), + "rtime": uint64(312), + "scur": uint64(1), + "smax": uint64(32), + "srv_abort": uint64(1), + "stot": uint64(171014), + "ttime": uint64(2341), + "wredis": uint64(0), + "wretr": uint64(1), } + acc.AssertContainsTaggedFields(t, "haproxy", fields, tags) //Here, we should get error because we don't pass authentication data r = &haproxy{ @@ -124,10 +111,39 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { "sv": "host0", } - assert.NoError(t, acc.ValidateTaggedValue("stot", uint64(171014), tags)) - assert.NoError(t, acc.ValidateTaggedValue("scur", uint64(1), tags)) - assert.NoError(t, acc.ValidateTaggedValue("rate", uint64(3), tags)) - assert.Equal(t, true, acc.CheckValue("bin", uint64(5557055817))) + fields := map[string]interface{}{ + "active_servers": uint64(1), + "backup_servers": uint64(0), + "bin": uint64(510913516), + "bout": uint64(2193856571), + "check_duration": uint64(10), + "cli_abort": uint64(73), + "ctime": uint64(2), + "downtime": uint64(0), + "dresp": uint64(0), + "econ": uint64(0), + "eresp": uint64(1), + "http_response.1xx": uint64(0), + "http_response.2xx": uint64(119534), + "http_response.3xx": uint64(48051), + "http_response.4xx": uint64(2345), + "http_response.5xx": uint64(1056), + "lbtot": uint64(171013), + "qcur": uint64(0), + "qmax": uint64(0), + "qtime": uint64(0), + "rate": uint64(3), + "rate_max": uint64(12), + "rtime": uint64(312), + "scur": uint64(1), + "smax": uint64(32), + "srv_abort": uint64(1), + "stot": uint64(171014), + "ttime": uint64(2341), + "wredis": uint64(0), + "wretr": uint64(1), + } + acc.AssertContainsTaggedFields(t, "haproxy", fields, tags) } //When not passing server config, we default to localhost diff --git a/plugins/httpjson/httpjson.go b/plugins/httpjson/httpjson.go index 40a771a91..9da0f63d0 100644 --- a/plugins/httpjson/httpjson.go +++ b/plugins/httpjson/httpjson.go @@ -153,7 +153,7 @@ func (h *HttpJson) gatherServer( } else { msrmnt_name = "httpjson_" + h.Name } - acc.AddFields(msrmnt_name, f.Fields, nil) + acc.AddFields(msrmnt_name, f.Fields, tags) return nil } diff --git a/plugins/httpjson/httpjson_test.go b/plugins/httpjson/httpjson_test.go index 8f9bfe3ac..7e9ffd331 100644 --- a/plugins/httpjson/httpjson_test.go +++ b/plugins/httpjson/httpjson_test.go @@ -1,7 +1,6 @@ package httpjson import ( - "fmt" "io/ioutil" "net/http" "strings" @@ -35,6 +34,11 @@ const validJSONTags = ` "build": "123" }` +var expectedFields = map[string]interface{}{ + "parent_child": float64(3), + "integer": float64(4), +} + const invalidJSON = "I don't think this is JSON" const empty = "" @@ -76,37 +80,36 @@ func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { // // Returns: // *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client -func genMockHttpJson(response string, statusCode int) *HttpJson { - return &HttpJson{ - client: mockHTTPClient{responseBody: response, statusCode: statusCode}, - Services: []Service{ - Service{ - Servers: []string{ - "http://server1.example.com/metrics/", - "http://server2.example.com/metrics/", - }, - Name: "my_webapp", - Method: "GET", - Parameters: map[string]string{ - "httpParam1": "12", - "httpParam2": "the second parameter", - }, +func genMockHttpJson(response string, statusCode int) []*HttpJson { + return []*HttpJson{ + &HttpJson{ + client: mockHTTPClient{responseBody: response, statusCode: statusCode}, + Servers: []string{ + "http://server1.example.com/metrics/", + "http://server2.example.com/metrics/", }, - Service{ - Servers: []string{ - "http://server3.example.com/metrics/", - "http://server4.example.com/metrics/", - }, - Name: "other_webapp", - Method: "POST", - Parameters: map[string]string{ - "httpParam1": "12", - "httpParam2": "the second parameter", - }, - TagKeys: []string{ - "role", - "build", - }, + Name: "my_webapp", + Method: "GET", + Parameters: map[string]string{ + "httpParam1": "12", + "httpParam2": "the second parameter", + }, + }, + &HttpJson{ + client: mockHTTPClient{responseBody: response, statusCode: statusCode}, + Servers: []string{ + "http://server3.example.com/metrics/", + "http://server4.example.com/metrics/", + }, + Name: "other_webapp", + Method: "POST", + Parameters: map[string]string{ + "httpParam1": "12", + "httpParam2": "the second parameter", + }, + TagKeys: []string{ + "role", + "build", }, }, } @@ -116,28 +119,15 @@ func genMockHttpJson(response string, statusCode int) *HttpJson { func TestHttpJson200(t *testing.T) { httpjson := genMockHttpJson(validJSON, 200) - var acc testutil.Accumulator - err := httpjson.Gather(&acc) - require.NoError(t, err) - - assert.Equal(t, 8, len(acc.Points)) - - for _, service := range httpjson.Services { + for _, service := range httpjson { + var acc testutil.Accumulator + err := service.Gather(&acc) + require.NoError(t, err) + assert.Equal(t, 4, acc.NFields()) for _, srv := range service.Servers { - require.NoError(t, - acc.ValidateTaggedValue( - fmt.Sprintf("%s_parent_child", service.Name), - 3.0, - map[string]string{"server": srv}, - ), - ) - require.NoError(t, - acc.ValidateTaggedValue( - fmt.Sprintf("%s_integer", service.Name), - 4.0, - map[string]string{"server": srv}, - ), - ) + tags := map[string]string{"server": srv} + mname := "httpjson_" + service.Name + acc.AssertContainsTaggedFields(t, mname, expectedFields, tags) } } } @@ -147,28 +137,22 @@ func TestHttpJson500(t *testing.T) { httpjson := genMockHttpJson(validJSON, 500) var acc testutil.Accumulator - err := httpjson.Gather(&acc) + err := httpjson[0].Gather(&acc) assert.NotNil(t, err) - // 4 error lines for (2 urls) * (2 services) - assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4) - assert.Equal(t, 0, len(acc.Points)) + assert.Equal(t, 0, acc.NFields()) } // Test response to HTTP 405 func TestHttpJsonBadMethod(t *testing.T) { httpjson := genMockHttpJson(validJSON, 200) - httpjson.Services[0].Method = "NOT_A_REAL_METHOD" + httpjson[0].Method = "NOT_A_REAL_METHOD" var acc testutil.Accumulator - err := httpjson.Gather(&acc) + err := httpjson[0].Gather(&acc) assert.NotNil(t, err) - // 2 error lines for (2 urls) * (1 falied service) - assert.Equal(t, len(strings.Split(err.Error(), "\n")), 2) - - // (2 measurements) * (2 servers) * (1 successful service) - assert.Equal(t, 4, len(acc.Points)) + assert.Equal(t, 0, acc.NFields()) } // Test response to malformed JSON @@ -176,12 +160,10 @@ func TestHttpJsonBadJson(t *testing.T) { httpjson := genMockHttpJson(invalidJSON, 200) var acc testutil.Accumulator - err := httpjson.Gather(&acc) + err := httpjson[0].Gather(&acc) assert.NotNil(t, err) - // 4 error lines for (2 urls) * (2 services) - assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4) - assert.Equal(t, 0, len(acc.Points)) + assert.Equal(t, 0, acc.NFields()) } // Test response to empty string as response objectgT @@ -189,34 +171,27 @@ func TestHttpJsonEmptyResponse(t *testing.T) { httpjson := genMockHttpJson(empty, 200) var acc testutil.Accumulator - err := httpjson.Gather(&acc) + err := httpjson[0].Gather(&acc) assert.NotNil(t, err) - // 4 error lines for (2 urls) * (2 services) - assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4) - assert.Equal(t, 0, len(acc.Points)) + assert.Equal(t, 0, acc.NFields()) } // Test that the proper values are ignored or collected func TestHttpJson200Tags(t *testing.T) { httpjson := genMockHttpJson(validJSONTags, 200) - var acc testutil.Accumulator - err := httpjson.Gather(&acc) - require.NoError(t, err) - - assert.Equal(t, 4, len(acc.Points)) - - for _, service := range httpjson.Services { + for _, service := range httpjson { if service.Name == "other_webapp" { + var acc testutil.Accumulator + err := service.Gather(&acc) + require.NoError(t, err) + assert.Equal(t, 2, acc.NFields()) for _, srv := range service.Servers { - require.NoError(t, - acc.ValidateTaggedValue( - fmt.Sprintf("%s_value", service.Name), - 15.0, - map[string]string{"server": srv, "role": "master", "build": "123"}, - ), - ) + tags := map[string]string{"server": srv, "role": "master", "build": "123"} + fields := map[string]interface{}{"value": float64(15)} + mname := "httpjson_" + service.Name + acc.AssertContainsTaggedFields(t, mname, fields, tags) } } } diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 3262db169..3800fef4a 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -1,6 +1,7 @@ package testutil import ( + "encoding/json" "fmt" "reflect" "sync" @@ -25,7 +26,9 @@ func (p *Point) String() string { // Accumulator defines a mocked out accumulator type Accumulator struct { sync.Mutex + Points []*Point + debug bool } // Add adds a measurement point to the accumulator @@ -59,6 +62,14 @@ func (a *Accumulator) AddFields( t = time.Now() } + if a.debug { + pretty, _ := json.MarshalIndent(fields, "", " ") + prettyTags, _ := json.MarshalIndent(tags, "", " ") + msg := fmt.Sprintf("Adding Measurement [%s]\nFields:%s\nTags:%s\n", + measurement, string(pretty), string(prettyTags)) + fmt.Print(msg) + } + p := &Point{ Measurement: measurement, Fields: fields, @@ -66,10 +77,7 @@ func (a *Accumulator) AddFields( Time: t, } - a.Points = append( - a.Points, - p, - ) + a.Points = append(a.Points, p) } func (a *Accumulator) SetDefaultTags(tags map[string]string) { @@ -91,11 +99,12 @@ func (a *Accumulator) SetPrefix(prefix string) { func (a *Accumulator) Debug() bool { // stub for implementing Accumulator interface. - return true + return a.debug } func (a *Accumulator) SetDebug(debug bool) { // stub for implementing Accumulator interface. + a.debug = debug } // Get gets the specified measurement point from the accumulator @@ -134,8 +143,10 @@ func (a *Accumulator) AssertContainsTaggedFields( if p.Measurement == measurement { if !reflect.DeepEqual(fields, p.Fields) { - msg := fmt.Sprintf("Actual:\n %v (%T) \nExpected:\n %v (%T)", - p.Fields, p.Fields, fields, fields) + pActual, _ := json.MarshalIndent(p.Fields, "", " ") + pExp, _ := json.MarshalIndent(fields, "", " ") + msg := fmt.Sprintf("Actual:\n%s\n(%T) \nExpected:\n%s\n(%T)", + string(pActual), p.Fields, string(pExp), fields) assert.Fail(t, msg) } return @@ -153,8 +164,10 @@ func (a *Accumulator) AssertContainsFields( for _, p := range a.Points { if p.Measurement == measurement { if !reflect.DeepEqual(fields, p.Fields) { - msg := fmt.Sprintf("Actual:\n %v (%T) \nExpected:\n %v (%T)", - p.Fields, p.Fields, fields, fields) + pActual, _ := json.MarshalIndent(p.Fields, "", " ") + pExp, _ := json.MarshalIndent(fields, "", " ") + msg := fmt.Sprintf("Actual:\n%s\n(%T) \nExpected:\n%s\n(%T)", + string(pActual), p.Fields, string(pExp), fields) assert.Fail(t, msg) } return From 9ada89d51a5353c850c7a665ed1dee1a0fbebcce Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 6 Jan 2016 16:55:28 -0700 Subject: [PATCH 033/103] 0.3.0 unit tests: jolokia, kafka_consumer, leofs, lustre2 --- accumulator.go | 4 ++ plugins/jolokia/jolokia.go | 1 - plugins/jolokia/jolokia_test.go | 63 +++++------------- plugins/kafka_consumer/kafka_consumer_test.go | 3 +- plugins/leofs/leofs_test.go | 4 +- plugins/lustre2/lustre2.go | 29 +++++++-- plugins/lustre2/lustre2_test.go | 64 ++++++++----------- testutil/accumulator.go | 4 ++ 8 files changed, 75 insertions(+), 97 deletions(-) diff --git a/accumulator.go b/accumulator.go index 2defc8c7b..f14df63f7 100644 --- a/accumulator.go +++ b/accumulator.go @@ -69,6 +69,10 @@ func (ac *accumulator) AddFields( tags map[string]string, t ...time.Time, ) { + if len(fields) == 0 || len(measurement) == 0 { + return + } + if !ac.pluginConfig.Filter.ShouldTagsPass(tags) { return } diff --git a/plugins/jolokia/jolokia.go b/plugins/jolokia/jolokia.go index 8bebbc3c5..610f08cd5 100644 --- a/plugins/jolokia/jolokia.go +++ b/plugins/jolokia/jolokia.go @@ -75,7 +75,6 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { if err != nil { return nil, err } - defer req.Body.Close() resp, err := j.jClient.MakeRequest(req) if err != nil { diff --git a/plugins/jolokia/jolokia_test.go b/plugins/jolokia/jolokia_test.go index 95df76e7b..d29b8a810 100644 --- a/plugins/jolokia/jolokia_test.go +++ b/plugins/jolokia/jolokia_test.go @@ -48,7 +48,7 @@ const empty = "" var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}} var HeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"} -var UsedHeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage", Pass: []string{"used"}} +var UsedHeapMetric = Metric{Name: "heap_memory_usage", Jmx: "/java.lang:type=Memory/HeapMemoryUsage"} type jolokiaClientStub struct { responseBody string @@ -79,7 +79,6 @@ func genJolokiaClientStub(response string, statusCode int, servers []Server, met // Test that the proper values are ignored or collected func TestHttpJsonMultiValue(t *testing.T) { - jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{HeapMetric}) var acc testutil.Accumulator @@ -88,58 +87,28 @@ func TestHttpJsonMultiValue(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 1, len(acc.Points)) - assert.True(t, acc.CheckFieldsValue("heap_memory_usage", map[string]interface{}{"init": 67108864.0, - "committed": 456130560.0, - "max": 477626368.0, - "used": 203288528.0})) -} - -// Test that the proper values are ignored or collected -func TestHttpJsonMultiValueWithPass(t *testing.T) { - - jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{UsedHeapMetric}) - - var acc testutil.Accumulator - err := jolokia.Gather(&acc) - - assert.Nil(t, err) - assert.Equal(t, 1, len(acc.Points)) - - assert.True(t, acc.CheckFieldsValue("heap_memory_usage", map[string]interface{}{"used": 203288528.0})) -} - -// Test that the proper values are ignored or collected -func TestHttpJsonMultiValueTags(t *testing.T) { - - jolokia := genJolokiaClientStub(validMultiValueJSON, 200, Servers, []Metric{UsedHeapMetric}) - - var acc testutil.Accumulator - err := jolokia.Gather(&acc) - - assert.Nil(t, err) - assert.Equal(t, 1, len(acc.Points)) - assert.NoError(t, acc.ValidateTaggedFieldsValue("heap_memory_usage", map[string]interface{}{"used": 203288528.0}, map[string]string{"host": "127.0.0.1", "port": "8080", "server": "as1"})) -} - -// Test that the proper values are ignored or collected -func TestHttpJsonSingleValueTags(t *testing.T) { - - jolokia := genJolokiaClientStub(validSingleValueJSON, 200, Servers, []Metric{UsedHeapMetric}) - - var acc testutil.Accumulator - err := jolokia.Gather(&acc) - - assert.Nil(t, err) - assert.Equal(t, 1, len(acc.Points)) - assert.NoError(t, acc.ValidateTaggedFieldsValue("heap_memory_usage", map[string]interface{}{"value": 209274376.0}, map[string]string{"host": "127.0.0.1", "port": "8080", "server": "as1"})) + fields := map[string]interface{}{ + "heap_memory_usage_init": 67108864.0, + "heap_memory_usage_committed": 456130560.0, + "heap_memory_usage_max": 477626368.0, + "heap_memory_usage_used": 203288528.0, + } + tags := map[string]string{ + "host": "127.0.0.1", + "port": "8080", + "server": "as1", + } + acc.AssertContainsTaggedFields(t, "jolokia", fields, tags) } // Test that the proper values are ignored or collected func TestHttpJsonOn404(t *testing.T) { - jolokia := genJolokiaClientStub(validMultiValueJSON, 404, Servers, []Metric{UsedHeapMetric}) + jolokia := genJolokiaClientStub(validMultiValueJSON, 404, Servers, + []Metric{UsedHeapMetric}) var acc testutil.Accumulator + acc.SetDebug(true) err := jolokia.Gather(&acc) assert.Nil(t, err) diff --git a/plugins/kafka_consumer/kafka_consumer_test.go b/plugins/kafka_consumer/kafka_consumer_test.go index eb0473361..dcd38f6c4 100644 --- a/plugins/kafka_consumer/kafka_consumer_test.go +++ b/plugins/kafka_consumer/kafka_consumer_test.go @@ -85,7 +85,8 @@ func TestRunParserAndGather(t *testing.T) { k.Gather(&acc) assert.Equal(t, len(acc.Points), 1) - assert.True(t, acc.CheckValue("cpu_load_short", 23422.0)) + acc.AssertContainsFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(23422)}) } func saramaMsg(val string) *sarama.ConsumerMessage { diff --git a/plugins/leofs/leofs_test.go b/plugins/leofs/leofs_test.go index 62a9f3fa3..48a82a18a 100644 --- a/plugins/leofs/leofs_test.go +++ b/plugins/leofs/leofs_test.go @@ -129,7 +129,6 @@ func buildFakeSNMPCmd(src string) { } func testMain(t *testing.T, code string, endpoint string, serverType ServerType) { - // Build the fake snmpwalk for test src := makeFakeSNMPSrc(code) defer os.Remove(src) @@ -145,6 +144,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) } var acc testutil.Accumulator + acc.SetDebug(true) err := l.Gather(&acc) require.NoError(t, err) @@ -152,7 +152,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) floatMetrics := KeyMapping[serverType] for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatValue(metric), metric) + assert.True(t, acc.HasFloatField("leofs", metric), metric) } } diff --git a/plugins/lustre2/lustre2.go b/plugins/lustre2/lustre2.go index 29cd06acf..65f936966 100644 --- a/plugins/lustre2/lustre2.go +++ b/plugins/lustre2/lustre2.go @@ -22,6 +22,9 @@ import ( type Lustre2 struct { Ost_procfiles []string Mds_procfiles []string + + // allFields maps and OST name to the metric fields associated with that OST + allFields map[string]map[string]interface{} } var sampleConfig = ` @@ -140,8 +143,11 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, */ path := strings.Split(file, "/") name := path[len(path)-2] - tags := map[string]string{ - "name": name, + var fields map[string]interface{} + fields, ok := l.allFields[name] + if !ok { + fields = make(map[string]interface{}) + l.allFields[name] = fields } lines, err := internal.ReadLines(file) @@ -149,7 +155,6 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, return err } - fields := make(map[string]interface{}) for _, line := range lines { parts := strings.Fields(line) for _, wanted := range wanted_fields { @@ -173,7 +178,6 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, } } } - acc.AddFields("lustre2", fields, tags) } return nil } @@ -190,15 +194,18 @@ func (l *Lustre2) Description() string { // Gather reads stats from all lustre targets func (l *Lustre2) Gather(acc plugins.Accumulator) error { + l.allFields = make(map[string]map[string]interface{}) if len(l.Ost_procfiles) == 0 { // read/write bytes are in obdfilter//stats - err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", wanted_ost_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", + wanted_ost_fields, acc) if err != nil { return err } // cache counters are in osd-ldiskfs//stats - err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", wanted_ost_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", + wanted_ost_fields, acc) if err != nil { return err } @@ -206,7 +213,8 @@ func (l *Lustre2) Gather(acc plugins.Accumulator) error { if len(l.Mds_procfiles) == 0 { // Metadata server stats - err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", wanted_mds_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", + wanted_mds_fields, acc) if err != nil { return err } @@ -225,6 +233,13 @@ func (l *Lustre2) Gather(acc plugins.Accumulator) error { } } + for name, fields := range l.allFields { + tags := map[string]string{ + "name": name, + } + acc.AddFields("lustre2", fields, tags) + } + return nil } diff --git a/plugins/lustre2/lustre2_test.go b/plugins/lustre2/lustre2_test.go index 850a4ff32..cea98fa1e 100644 --- a/plugins/lustre2/lustre2_test.go +++ b/plugins/lustre2/lustre2_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -58,11 +57,6 @@ samedir_rename 259625 samples [reqs] crossdir_rename 369571 samples [reqs] ` -type metrics struct { - name string - value uint64 -} - func TestLustre2GeneratesMetrics(t *testing.T) { tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" @@ -103,41 +97,33 @@ func TestLustre2GeneratesMetrics(t *testing.T) { "name": ost_name, } - intMetrics := []*metrics{ - { - name: "write_bytes", - value: 15201500833981, - }, - { - name: "read_bytes", - value: 78026117632000, - }, - { - name: "write_calls", - value: 71893382, - }, - { - name: "read_calls", - value: 203238095, - }, - { - name: "cache_hit", - value: 7393729777, - }, - { - name: "cache_access", - value: 19047063027, - }, - { - name: "cache_miss", - value: 11653333250, - }, + fields := map[string]interface{}{ + "cache_access": uint64(19047063027), + "cache_hit": uint64(7393729777), + "cache_miss": uint64(11653333250), + "close": uint64(873243496), + "crossdir_rename": uint64(369571), + "getattr": uint64(1503663097), + "getxattr": uint64(6145349681), + "link": uint64(445), + "mkdir": uint64(705499), + "mknod": uint64(349042), + "open": uint64(1024577037), + "read_bytes": uint64(78026117632000), + "read_calls": uint64(203238095), + "rename": uint64(629196), + "rmdir": uint64(227434), + "samedir_rename": uint64(259625), + "setattr": uint64(1898364), + "setxattr": uint64(83969), + "statfs": uint64(2916320), + "sync": uint64(434081), + "unlink": uint64(3549417), + "write_bytes": uint64(15201500833981), + "write_calls": uint64(71893382), } - for _, metric := range intMetrics { - assert.True(t, acc.HasUIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } + acc.AssertContainsTaggedFields(t, "lustre2", fields, tags) err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 3800fef4a..7cdfb4155 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -55,6 +55,10 @@ func (a *Accumulator) AddFields( tags = map[string]string{} } + if len(fields) == 0 { + return + } + var t time.Time if len(timestamp) > 0 { t = timestamp[0] From 6a4bf9fcffbae1c849eaf3fee25c597a481af5b0 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 6 Jan 2016 17:19:39 -0700 Subject: [PATCH 034/103] 0.3.0 unit tests: mailchimp, memcached, mongodb --- plugins/mailchimp/mailchimp_test.go | 182 +++++++++------------------ plugins/memcached/memcached.go | 2 +- plugins/memcached/memcached_test.go | 2 +- plugins/mongodb/mongodb_data_test.go | 50 +++++--- 4 files changed, 100 insertions(+), 136 deletions(-) diff --git a/plugins/mailchimp/mailchimp_test.go b/plugins/mailchimp/mailchimp_test.go index bd800f656..5e5394581 100644 --- a/plugins/mailchimp/mailchimp_test.go +++ b/plugins/mailchimp/mailchimp_test.go @@ -9,7 +9,6 @@ import ( "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -42,67 +41,38 @@ func TestMailChimpGatherReports(t *testing.T) { tags["id"] = "42694e9e57" tags["campaign_title"] = "Freddie's Jokes Vol. 1" - testInts := []struct { - measurement string - value int - }{ - {"emails_sent", 200}, - {"abuse_reports", 0}, - {"unsubscribed", 2}, - {"hard_bounces", 0}, - {"soft_bounces", 2}, - {"syntax_errors", 0}, - {"forwards_count", 0}, - {"forwards_opens", 0}, - {"opens_total", 186}, - {"unique_opens", 100}, - {"clicks_total", 42}, - {"unique_clicks", 400}, - {"unique_subscriber_clicks", 42}, - {"facebook_recipient_likes", 5}, - {"facebook_unique_likes", 8}, - {"facebook_likes", 42}, - } - for _, test := range testInts { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) - } - - testFloats := []struct { - measurement string - value float64 - }{ - {"open_rate", 42}, - {"click_rate", 42}, - {"industry_open_rate", 0.17076777144396}, - {"industry_click_rate", 0.027431311866951}, - {"industry_bounce_rate", 0.0063767751251474}, - {"industry_unopen_rate", 0.82285545343089}, - {"industry_unsub_rate", 0.001436957032815}, - {"industry_abuse_rate", 0.00021111996110887}, - {"list_stats_sub_rate", 10}, - {"list_stats_unsub_rate", 20}, - {"list_stats_open_rate", 42}, - {"list_stats_click_rate", 42}, - } - for _, test := range testFloats { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) - } - - testStrings := []struct { - measurement string - value string - }{ - {"industry_type", "Social Networks and Online Communities"}, - } - for _, test := range testStrings { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) + fields := map[string]interface{}{ + "emails_sent": int(200), + "abuse_reports": int(0), + "unsubscribed": int(2), + "hard_bounces": int(0), + "soft_bounces": int(2), + "syntax_errors": int(0), + "forwards_count": int(0), + "forwards_opens": int(0), + "opens_total": int(186), + "unique_opens": int(100), + "clicks_total": int(42), + "unique_clicks": int(400), + "unique_subscriber_clicks": int(42), + "facebook_recipient_likes": int(5), + "facebook_unique_likes": int(8), + "facebook_likes": int(42), + "open_rate": float64(42), + "click_rate": float64(42), + "industry_open_rate": float64(0.17076777144396), + "industry_click_rate": float64(0.027431311866951), + "industry_bounce_rate": float64(0.0063767751251474), + "industry_unopen_rate": float64(0.82285545343089), + "industry_unsub_rate": float64(0.001436957032815), + "industry_abuse_rate": float64(0.00021111996110887), + "list_stats_sub_rate": float64(10), + "list_stats_unsub_rate": float64(20), + "list_stats_open_rate": float64(42), + "list_stats_click_rate": float64(42), + "industry_type": "Social Networks and Online Communities", } + acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags) } func TestMailChimpGatherReport(t *testing.T) { @@ -135,67 +105,39 @@ func TestMailChimpGatherReport(t *testing.T) { tags["id"] = "42694e9e57" tags["campaign_title"] = "Freddie's Jokes Vol. 1" - testInts := []struct { - measurement string - value int - }{ - {"emails_sent", 200}, - {"abuse_reports", 0}, - {"unsubscribed", 2}, - {"hard_bounces", 0}, - {"soft_bounces", 2}, - {"syntax_errors", 0}, - {"forwards_count", 0}, - {"forwards_opens", 0}, - {"opens_total", 186}, - {"unique_opens", 100}, - {"clicks_total", 42}, - {"unique_clicks", 400}, - {"unique_subscriber_clicks", 42}, - {"facebook_recipient_likes", 5}, - {"facebook_unique_likes", 8}, - {"facebook_likes", 42}, - } - for _, test := range testInts { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) + fields := map[string]interface{}{ + "emails_sent": int(200), + "abuse_reports": int(0), + "unsubscribed": int(2), + "hard_bounces": int(0), + "soft_bounces": int(2), + "syntax_errors": int(0), + "forwards_count": int(0), + "forwards_opens": int(0), + "opens_total": int(186), + "unique_opens": int(100), + "clicks_total": int(42), + "unique_clicks": int(400), + "unique_subscriber_clicks": int(42), + "facebook_recipient_likes": int(5), + "facebook_unique_likes": int(8), + "facebook_likes": int(42), + "open_rate": float64(42), + "click_rate": float64(42), + "industry_open_rate": float64(0.17076777144396), + "industry_click_rate": float64(0.027431311866951), + "industry_bounce_rate": float64(0.0063767751251474), + "industry_unopen_rate": float64(0.82285545343089), + "industry_unsub_rate": float64(0.001436957032815), + "industry_abuse_rate": float64(0.00021111996110887), + "list_stats_sub_rate": float64(10), + "list_stats_unsub_rate": float64(20), + "list_stats_open_rate": float64(42), + "list_stats_click_rate": float64(42), + "industry_type": "Social Networks and Online Communities", } + acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags) - testFloats := []struct { - measurement string - value float64 - }{ - {"open_rate", 42}, - {"click_rate", 42}, - {"industry_open_rate", 0.17076777144396}, - {"industry_click_rate", 0.027431311866951}, - {"industry_bounce_rate", 0.0063767751251474}, - {"industry_unopen_rate", 0.82285545343089}, - {"industry_unsub_rate", 0.001436957032815}, - {"industry_abuse_rate", 0.00021111996110887}, - {"list_stats_sub_rate", 10}, - {"list_stats_unsub_rate", 20}, - {"list_stats_open_rate", 42}, - {"list_stats_click_rate", 42}, - } - for _, test := range testFloats { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) - } - - testStrings := []struct { - measurement string - value string - }{ - {"industry_type", "Social Networks and Online Communities"}, - } - for _, test := range testStrings { - assert.True(t, acc.CheckTaggedValue(test.measurement, test.value, tags), - fmt.Sprintf("Measurement: %v, value: %v, tags: %v not found", - test.measurement, test.value, tags)) - } } func TestMailChimpGatherError(t *testing.T) { diff --git a/plugins/memcached/memcached.go b/plugins/memcached/memcached.go index 5aba8c9fb..196478264 100644 --- a/plugins/memcached/memcached.go +++ b/plugins/memcached/memcached.go @@ -141,7 +141,7 @@ func (m *Memcached) gatherServer( for _, key := range sendMetrics { if value, ok := values[key]; ok { // Mostly it is the number - if iValue, errParse := strconv.ParseInt(value, 10, 64); errParse != nil { + if iValue, errParse := strconv.ParseInt(value, 10, 64); errParse == nil { fields[key] = iValue } else { fields[key] = value diff --git a/plugins/memcached/memcached_test.go b/plugins/memcached/memcached_test.go index 05ff669b3..6e2f8452a 100644 --- a/plugins/memcached/memcached_test.go +++ b/plugins/memcached/memcached_test.go @@ -32,7 +32,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) { "bytes_read", "bytes_written", "threads", "conn_yields"} for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric), metric) + assert.True(t, acc.HasIntField("memcached", metric), metric) } } diff --git a/plugins/mongodb/mongodb_data_test.go b/plugins/mongodb/mongodb_data_test.go index 9ee3f9f48..5d24a7a09 100644 --- a/plugins/mongodb/mongodb_data_test.go +++ b/plugins/mongodb/mongodb_data_test.go @@ -6,7 +6,6 @@ import ( "github.com/influxdb/telegraf/testutil" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var tags = make(map[string]string) @@ -37,10 +36,11 @@ func TestAddNonReplStats(t *testing.T) { ) var acc testutil.Accumulator - d.AddDefaultStats(&acc) + d.AddDefaultStats() + d.flush(&acc) for key, _ := range DefaultStats { - assert.True(t, acc.HasIntValue(key)) + assert.True(t, acc.HasIntField("mongodb", key)) } } @@ -57,10 +57,11 @@ func TestAddReplStats(t *testing.T) { var acc testutil.Accumulator - d.AddDefaultStats(&acc) + d.AddDefaultStats() + d.flush(&acc) for key, _ := range MmapStats { - assert.True(t, acc.HasIntValue(key)) + assert.True(t, acc.HasIntField("mongodb", key)) } } @@ -76,10 +77,11 @@ func TestAddWiredTigerStats(t *testing.T) { var acc testutil.Accumulator - d.AddDefaultStats(&acc) + d.AddDefaultStats() + d.flush(&acc) for key, _ := range WiredTigerStats { - assert.True(t, acc.HasFloatValue(key)) + assert.True(t, acc.HasFloatField("mongodb", key)) } } @@ -95,17 +97,37 @@ func TestStateTag(t *testing.T) { tags, ) - stats := []string{"inserts_per_sec", "queries_per_sec"} - stateTags := make(map[string]string) stateTags["state"] = "PRI" var acc testutil.Accumulator - d.AddDefaultStats(&acc) - - for _, key := range stats { - err := acc.ValidateTaggedValue(key, int64(0), stateTags) - require.NoError(t, err) + d.AddDefaultStats() + d.flush(&acc) + fields := map[string]interface{}{ + "active_reads": int64(0), + "active_writes": int64(0), + "commands_per_sec": int64(0), + "deletes_per_sec": int64(0), + "flushes_per_sec": int64(0), + "getmores_per_sec": int64(0), + "inserts_per_sec": int64(0), + "member_status": "PRI", + "net_in_bytes": int64(0), + "net_out_bytes": int64(0), + "open_connections": int64(0), + "queries_per_sec": int64(0), + "queued_reads": int64(0), + "queued_writes": int64(0), + "repl_commands_per_sec": int64(0), + "repl_deletes_per_sec": int64(0), + "repl_getmores_per_sec": int64(0), + "repl_inserts_per_sec": int64(0), + "repl_queries_per_sec": int64(0), + "repl_updates_per_sec": int64(0), + "resident_megabytes": int64(0), + "updates_per_sec": int64(0), + "vsize_megabytes": int64(0), } + acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) } From 6eb49dee5d6989f7ebf5f0af2da244dec2d9b954 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 6 Jan 2016 17:37:56 -0700 Subject: [PATCH 035/103] 0.3.0 unit tests: mysql, nginx, phpfpm, ping, postgres --- plugins/mysql/mysql_test.go | 61 +-------------------------- plugins/nginx/nginx_test.go | 24 ++++------- plugins/phpfpm/phpfpm_test.go | 30 ++++++------- plugins/ping/ping_test.go | 40 ++++++++++-------- plugins/postgresql/postgresql_test.go | 46 +++++++------------- 5 files changed, 59 insertions(+), 142 deletions(-) diff --git a/plugins/mysql/mysql_test.go b/plugins/mysql/mysql_test.go index d424f284b..2362002bc 100644 --- a/plugins/mysql/mysql_test.go +++ b/plugins/mysql/mysql_test.go @@ -2,7 +2,6 @@ package mysql import ( "fmt" - "strings" "testing" "github.com/influxdb/telegraf/testutil" @@ -10,64 +9,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestMysqlGeneratesMetrics(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - m := &Mysql{ - Servers: []string{fmt.Sprintf("root@tcp(%s:3306)/", testutil.GetLocalHost())}, - } - - var acc testutil.Accumulator - - err := m.Gather(&acc) - require.NoError(t, err) - - prefixes := []struct { - prefix string - count int - }{ - {"commands", 139}, - {"handler", 16}, - {"bytes", 2}, - {"innodb", 46}, - {"threads", 4}, - {"aborted", 2}, - {"created", 3}, - {"key", 7}, - {"open", 7}, - {"opened", 3}, - {"qcache", 8}, - {"table", 1}, - } - - intMetrics := []string{ - "queries", - "slow_queries", - "connections", - } - - for _, prefix := range prefixes { - var count int - - for _, p := range acc.Points { - if strings.HasPrefix(p.Measurement, prefix.prefix) { - count++ - } - } - - if prefix.count > count { - t.Errorf("Expected less than %d measurements with prefix %s, got %d", - count, prefix.prefix, prefix.count) - } - } - - for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric)) - } -} - func TestMysqlDefaultsToLocal(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") @@ -82,7 +23,7 @@ func TestMysqlDefaultsToLocal(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, len(acc.Points) > 0) + assert.True(t, acc.HasMeasurement("mysql")) } func TestMysqlParseDSN(t *testing.T) { diff --git a/plugins/nginx/nginx_test.go b/plugins/nginx/nginx_test.go index 74dd37d31..9d694bc26 100644 --- a/plugins/nginx/nginx_test.go +++ b/plugins/nginx/nginx_test.go @@ -54,17 +54,14 @@ func TestNginxGeneratesMetrics(t *testing.T) { err := n.Gather(&acc) require.NoError(t, err) - metrics := []struct { - name string - value uint64 - }{ - {"active", 585}, - {"accepts", 85340}, - {"handled", 85340}, - {"requests", 35085}, - {"reading", 4}, - {"writing", 135}, - {"waiting", 446}, + fields := map[string]interface{}{ + "active": uint64(585), + "accepts": uint64(85340), + "handled": uint64(85340), + "requests": uint64(35085), + "reading": uint64(4), + "writing": uint64(135), + "waiting": uint64(446), } addr, err := url.Parse(ts.URL) if err != nil { @@ -84,8 +81,5 @@ func TestNginxGeneratesMetrics(t *testing.T) { } tags := map[string]string{"server": host, "port": port} - - for _, m := range metrics { - assert.NoError(t, acc.ValidateTaggedValue(m.name, m.value, tags)) - } + acc.AssertContainsTaggedFields(t, "nginx", fields, tags) } diff --git a/plugins/phpfpm/phpfpm_test.go b/plugins/phpfpm/phpfpm_test.go index 8fa192806..2f34372bf 100644 --- a/plugins/phpfpm/phpfpm_test.go +++ b/plugins/phpfpm/phpfpm_test.go @@ -32,27 +32,21 @@ func TestPhpFpmGeneratesMetrics(t *testing.T) { "url": ts.Listener.Addr().String(), "pool": "www", } - assert.NoError(t, acc.ValidateTaggedValue("accepted_conn", int64(3), tags)) - checkInt := []struct { - name string - value int64 - }{ - {"accepted_conn", 3}, - {"listen_queue", 1}, - {"max_listen_queue", 0}, - {"listen_queue_len", 0}, - {"idle_processes", 1}, - {"active_processes", 1}, - {"total_processes", 2}, - {"max_active_processes", 1}, - {"max_children_reached", 2}, - {"slow_requests", 1}, + fields := map[string]interface{}{ + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), } - for _, c := range checkInt { - assert.Equal(t, true, acc.CheckValue(c.name, c.value)) - } + acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) } //When not passing server config, we default to localhost diff --git a/plugins/ping/ping_test.go b/plugins/ping/ping_test.go index 5fed0b6c8..7ae86534d 100644 --- a/plugins/ping/ping_test.go +++ b/plugins/ping/ping_test.go @@ -120,18 +120,16 @@ func TestPingGather(t *testing.T) { p.Gather(&acc) tags := map[string]string{"url": "www.google.com"} - assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_received", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 0.0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", - 43.628, tags)) + fields := map[string]interface{}{ + "packets_transmitted": 5, + "packets_received": 5, + "percent_packet_loss": 0.0, + "average_response_ms": 43.628, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) tags = map[string]string{"url": "www.reddit.com"} - assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_received", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 0.0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", - 43.628, tags)) + acc.AssertContainsTaggedFields(t, "ping", fields, tags) } var lossyPingOutput = ` @@ -159,10 +157,13 @@ func TestLossyPingGather(t *testing.T) { p.Gather(&acc) tags := map[string]string{"url": "www.google.com"} - assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 5, tags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_received", 3, tags)) - assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 40.0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", 44.033, tags)) + fields := map[string]interface{}{ + "packets_transmitted": 5, + "packets_received": 3, + "percent_packet_loss": 40.0, + "average_response_ms": 44.033, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) } var errorPingOutput = ` @@ -188,10 +189,13 @@ func TestBadPingGather(t *testing.T) { p.Gather(&acc) tags := map[string]string{"url": "www.amazon.com"} - assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 2, tags)) - assert.NoError(t, acc.ValidateTaggedValue("packets_received", 0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 100.0, tags)) - assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", 0.0, tags)) + fields := map[string]interface{}{ + "packets_transmitted": 2, + "packets_received": 0, + "percent_packet_loss": 100.0, + "average_response_ms": 0.0, + } + acc.AssertContainsTaggedFields(t, "ping", fields, tags) } func mockFatalHostPinger(args ...string) (string, error) { diff --git a/plugins/postgresql/postgresql_test.go b/plugins/postgresql/postgresql_test.go index 4a53a2e8f..0f4ff5579 100644 --- a/plugins/postgresql/postgresql_test.go +++ b/plugins/postgresql/postgresql_test.go @@ -15,13 +15,9 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { } p := &Postgresql{ - Servers: []*Server{ - { - Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", - testutil.GetLocalHost()), - Databases: []string{"postgres"}, - }, - }, + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), + Databases: []string{"postgres"}, } var acc testutil.Accumulator @@ -30,7 +26,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { require.NoError(t, err) availableColumns := make(map[string]bool) - for _, col := range p.Servers[0].OrderedColumns { + for _, col := range p.OrderedColumns { availableColumns[col] = true } @@ -61,7 +57,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { for _, metric := range intMetrics { _, ok := availableColumns[metric] if ok { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("postgresql", metric)) metricsCounted++ } } @@ -69,7 +65,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { for _, metric := range floatMetrics { _, ok := availableColumns[metric] if ok { - assert.True(t, acc.HasFloatValue(metric)) + assert.True(t, acc.HasFloatField("postgresql", metric)) metricsCounted++ } } @@ -84,13 +80,9 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { } p := &Postgresql{ - Servers: []*Server{ - { - Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", - testutil.GetLocalHost()), - Databases: []string{"postgres"}, - }, - }, + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), + Databases: []string{"postgres"}, } var acc testutil.Accumulator @@ -98,7 +90,7 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { err := p.Gather(&acc) require.NoError(t, err) - point, ok := acc.Get("xact_commit") + point, ok := acc.Get("postgresql") require.True(t, ok) assert.Equal(t, "postgres", point.Tags["db"]) @@ -110,12 +102,8 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { } p := &Postgresql{ - Servers: []*Server{ - { - Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", - testutil.GetLocalHost()), - }, - }, + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), } var acc testutil.Accumulator @@ -126,7 +114,7 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { var found bool for _, pnt := range acc.Points { - if pnt.Measurement == "xact_commit" { + if pnt.Measurement == "postgresql" { if pnt.Tags["db"] == "postgres" { found = true break @@ -143,12 +131,8 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { } p := &Postgresql{ - Servers: []*Server{ - { - Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", - testutil.GetLocalHost()), - }, - }, + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), } var acc testutil.Accumulator From ccbd7bb78569b850a74576c5cdd8566c2603ae2f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 6 Jan 2016 17:56:30 -0700 Subject: [PATCH 036/103] 0.3.0 unit tests: procstat, prometheus, puppetagent --- CHANGELOG.md | 2 + plugins/procstat/procstat_test.go | 8 +-- plugins/prometheus/prometheus.go | 3 +- plugins/prometheus/prometheus_test.go | 6 +- plugins/puppetagent/puppetagent_test.go | 86 +++++++++---------------- 5 files changed, 41 insertions(+), 64 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 703a66ad3..c6d438319 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ - **breaking change** Plugin measurements aggregated into a single measurement. - **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters for configuration. +- **breaking change** `procstat` plugin has `*cpu*` fields renamed to +`*cpu_time*` - `twemproxy` plugin: `prefix` option removed. - `procstat` cpu measurements are now prepended with `cpu_time_` instead of only `cpu_` diff --git a/plugins/procstat/procstat_test.go b/plugins/procstat/procstat_test.go index e0d17ac4d..6ec6834ca 100644 --- a/plugins/procstat/procstat_test.go +++ b/plugins/procstat/procstat_test.go @@ -20,11 +20,11 @@ func TestGather(t *testing.T) { file.Write([]byte(strconv.Itoa(pid))) file.Close() defer os.Remove(file.Name()) - specifications := []*Specification{&Specification{PidFile: file.Name(), Prefix: "foo"}} p := Procstat{ - Specifications: specifications, + PidFile: file.Name(), + Prefix: "foo", } p.Gather(&acc) - assert.True(t, acc.HasFloatValue("foo_cpu_user")) - assert.True(t, acc.HasUIntValue("foo_memory_vms")) + assert.True(t, acc.HasFloatField("procstat", "foo_cpu_time_user")) + assert.True(t, acc.HasUIntField("procstat", "foo_memory_vms")) } diff --git a/plugins/prometheus/prometheus.go b/plugins/prometheus/prometheus.go index 7775b97b6..2742393c7 100644 --- a/plugins/prometheus/prometheus.go +++ b/plugins/prometheus/prometheus.go @@ -77,7 +77,8 @@ func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error { if err == io.EOF { break } else if err != nil { - return fmt.Errorf("error getting processing samples for %s: %s", url, err) + return fmt.Errorf("error getting processing samples for %s: %s", + url, err) } for _, sample := range samples { tags := make(map[string]string) diff --git a/plugins/prometheus/prometheus_test.go b/plugins/prometheus/prometheus_test.go index 4f79822c1..901fe2da2 100644 --- a/plugins/prometheus/prometheus_test.go +++ b/plugins/prometheus/prometheus_test.go @@ -45,11 +45,11 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { value float64 tags map[string]string }{ - {"go_gc_duration_seconds_count", 7, map[string]string{}}, - {"go_goroutines", 15, map[string]string{}}, + {"prometheus_go_gc_duration_seconds_count", 7, map[string]string{}}, + {"prometheus_go_goroutines", 15, map[string]string{}}, } for _, e := range expected { - assert.NoError(t, acc.ValidateValue(e.name, e.value)) + assert.True(t, acc.HasFloatField(e.name, "value")) } } diff --git a/plugins/puppetagent/puppetagent_test.go b/plugins/puppetagent/puppetagent_test.go index 4d6a4c5f4..1d854ab46 100644 --- a/plugins/puppetagent/puppetagent_test.go +++ b/plugins/puppetagent/puppetagent_test.go @@ -2,7 +2,6 @@ package puppetagent import ( "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "testing" ) @@ -14,61 +13,36 @@ func TestGather(t *testing.T) { } pa.Gather(&acc) - checkInt := []struct { - name string - value int64 - }{ - {"events_failure", 0}, - {"events_total", 0}, - {"events_success", 0}, - {"resources_failed", 0}, - {"resources_scheduled", 0}, - {"resources_changed", 0}, - {"resources_skipped", 0}, - {"resources_total", 109}, - {"resources_failedtorestart", 0}, - {"resources_restarted", 0}, - {"resources_outofsync", 0}, - {"changes_total", 0}, - {"time_lastrun", 1444936531}, - {"version_config", 1444936521}, - } - - for _, c := range checkInt { - assert.Equal(t, true, acc.CheckValue(c.name, c.value)) - } - - checkFloat := []struct { - name string - value float64 - }{ - {"time_user", 0.004331}, - {"time_schedule", 0.001123}, - {"time_filebucket", 0.000353}, - {"time_file", 0.441472}, - {"time_exec", 0.508123}, - {"time_anchor", 0.000555}, - {"time_sshauthorizedkey", 0.000764}, - {"time_service", 1.807795}, - {"time_package", 1.325788}, - {"time_total", 8.85354707064819}, - {"time_configretrieval", 4.75567007064819}, - {"time_cron", 0.000584}, - } - - for _, f := range checkFloat { - assert.Equal(t, true, acc.CheckValue(f.name, f.value)) - } - - checkString := []struct { - name string - value string - }{ - {"version_puppet", "3.7.5"}, - } - - for _, s := range checkString { - assert.Equal(t, true, acc.CheckValue(s.name, s.value)) + tags := map[string]string{"location": "last_run_summary.yaml"} + fields := map[string]interface{}{ + "events_failure": int64(0), + "events_total": int64(0), + "events_success": int64(0), + "resources_failed": int64(0), + "resources_scheduled": int64(0), + "resources_changed": int64(0), + "resources_skipped": int64(0), + "resources_total": int64(109), + "resources_failedtorestart": int64(0), + "resources_restarted": int64(0), + "resources_outofsync": int64(0), + "changes_total": int64(0), + "time_lastrun": int64(1444936531), + "version_config": int64(1444936521), + "time_user": float64(0.004331), + "time_schedule": float64(0.001123), + "time_filebucket": float64(0.000353), + "time_file": float64(0.441472), + "time_exec": float64(0.508123), + "time_anchor": float64(0.000555), + "time_sshauthorizedkey": float64(0.000764), + "time_service": float64(1.807795), + "time_package": float64(1.325788), + "time_total": float64(8.85354707064819), + "time_configretrieval": float64(4.75567007064819), + "time_cron": float64(0.000584), + "version_puppet": "3.7.5", } + acc.AssertContainsTaggedFields(t, "puppetagent", fields, tags) } From c01594c2a4fc37c1cd20733b1f013ece80f3b8cf Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 6 Jan 2016 18:13:00 -0700 Subject: [PATCH 037/103] 0.3.0 unit tests: rabbitmq, redis --- plugins/rabbitmq/rabbitmq_test.go | 12 ++-- plugins/redis/redis_test.go | 94 +++++++++++++------------------ 2 files changed, 44 insertions(+), 62 deletions(-) diff --git a/plugins/rabbitmq/rabbitmq_test.go b/plugins/rabbitmq/rabbitmq_test.go index 38bfb7a7d..12b7aee70 100644 --- a/plugins/rabbitmq/rabbitmq_test.go +++ b/plugins/rabbitmq/rabbitmq_test.go @@ -394,11 +394,7 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { defer ts.Close() r := &RabbitMQ{ - Servers: []*Server{ - { - URL: ts.URL, - }, - }, + URL: ts.URL, } var acc testutil.Accumulator @@ -423,7 +419,7 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { } for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("rabbitmq_overview", metric)) } nodeIntMetrics := []string{ @@ -441,8 +437,8 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { } for _, metric := range nodeIntMetrics { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("rabbitmq_node", metric)) } - assert.True(t, acc.HasMeasurement("queue")) + assert.True(t, acc.HasMeasurement("rabbitmq_queue")) } diff --git a/plugins/redis/redis_test.go b/plugins/redis/redis_test.go index ff52e8c57..ec0cf998c 100644 --- a/plugins/redis/redis_test.go +++ b/plugins/redis/redis_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -36,61 +35,48 @@ func TestRedis_ParseMetrics(t *testing.T) { err := gatherInfoOutput(rdr, &acc, tags) require.NoError(t, err) - checkInt := []struct { - name string - value uint64 - }{ - {"uptime", 238}, - {"clients", 1}, - {"used_memory", 1003936}, - {"used_memory_rss", 811008}, - {"used_memory_peak", 1003936}, - {"used_memory_lua", 33792}, - {"rdb_changes_since_last_save", 0}, - {"total_connections_received", 2}, - {"total_commands_processed", 1}, - {"instantaneous_ops_per_sec", 0}, - {"sync_full", 0}, - {"sync_partial_ok", 0}, - {"sync_partial_err", 0}, - {"expired_keys", 0}, - {"evicted_keys", 0}, - {"keyspace_hits", 1}, - {"keyspace_misses", 1}, - {"pubsub_channels", 0}, - {"pubsub_patterns", 0}, - {"latest_fork_usec", 0}, - {"connected_slaves", 0}, - {"master_repl_offset", 0}, - {"repl_backlog_active", 0}, - {"repl_backlog_size", 1048576}, - {"repl_backlog_histlen", 0}, - {"keys", 2}, - {"expires", 0}, - {"avg_ttl", 0}, + fields := map[string]interface{}{ + "uptime": uint64(238), + "clients": uint64(1), + "used_memory": uint64(1003936), + "used_memory_rss": uint64(811008), + "used_memory_peak": uint64(1003936), + "used_memory_lua": uint64(33792), + "rdb_changes_since_last_save": uint64(0), + "total_connections_received": uint64(2), + "total_commands_processed": uint64(1), + "instantaneous_ops_per_sec": uint64(0), + "sync_full": uint64(0), + "sync_partial_ok": uint64(0), + "sync_partial_err": uint64(0), + "expired_keys": uint64(0), + "evicted_keys": uint64(0), + "keyspace_hits": uint64(1), + "keyspace_misses": uint64(1), + "pubsub_channels": uint64(0), + "pubsub_patterns": uint64(0), + "latest_fork_usec": uint64(0), + "connected_slaves": uint64(0), + "master_repl_offset": uint64(0), + "repl_backlog_active": uint64(0), + "repl_backlog_size": uint64(1048576), + "repl_backlog_histlen": uint64(0), + "mem_fragmentation_ratio": float64(0.81), + "instantaneous_input_kbps": float64(876.16), + "instantaneous_output_kbps": float64(3010.23), + "used_cpu_sys": float64(0.14), + "used_cpu_user": float64(0.05), + "used_cpu_sys_children": float64(0.00), + "used_cpu_user_children": float64(0.00), + "keyspace_hitrate": float64(0.50), } - - for _, c := range checkInt { - assert.True(t, acc.CheckValue(c.name, c.value)) - } - - checkFloat := []struct { - name string - value float64 - }{ - {"mem_fragmentation_ratio", 0.81}, - {"instantaneous_input_kbps", 876.16}, - {"instantaneous_output_kbps", 3010.23}, - {"used_cpu_sys", 0.14}, - {"used_cpu_user", 0.05}, - {"used_cpu_sys_children", 0.00}, - {"used_cpu_user_children", 0.00}, - {"keyspace_hitrate", 0.50}, - } - - for _, c := range checkFloat { - assert.True(t, acc.CheckValue(c.name, c.value)) + keyspaceFields := map[string]interface{}{ + "avg_ttl": uint64(0), + "expires": uint64(0), + "keys": uint64(2), } + acc.AssertContainsTaggedFields(t, "redis", fields, tags) + acc.AssertContainsTaggedFields(t, "redis_keyspace", keyspaceFields, tags) } const testOutput = `# Server From 10f19fade140a92496fd188f4f3b689bea0df4e5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 6 Jan 2016 18:19:18 -0700 Subject: [PATCH 038/103] 0.3.0 unit tests: statsd, trig, zookeeper --- plugins/statsd/statsd_test.go | 10 ++++------ plugins/trig/trig_test.go | 5 +---- plugins/zookeeper/zookeeper_test.go | 2 +- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/plugins/statsd/statsd_test.go b/plugins/statsd/statsd_test.go index 6c85b0c4b..4a97728f2 100644 --- a/plugins/statsd/statsd_test.go +++ b/plugins/statsd/statsd_test.go @@ -647,7 +647,7 @@ func TestParse_Counters(t *testing.T) { func TestParse_Timings(t *testing.T) { s := NewStatsd() s.Percentiles = []int{90} - testacc := &testutil.Accumulator{} + acc := &testutil.Accumulator{} // Test that counters work valid_lines := []string{ @@ -665,7 +665,7 @@ func TestParse_Timings(t *testing.T) { } } - s.Gather(testacc) + s.Gather(acc) tests := []struct { name string @@ -698,10 +698,8 @@ func TestParse_Timings(t *testing.T) { } for _, test := range tests { - if !testacc.CheckValue(test.name, test.value) { - t.Errorf("Did not find measurement %s with value %v", - test.name, test.value) - } + acc.AssertContainsFields(t, test.name, + map[string]interface{}{"value": test.value}) } } diff --git a/plugins/trig/trig_test.go b/plugins/trig/trig_test.go index 24218fe11..82605b0a5 100644 --- a/plugins/trig/trig_test.go +++ b/plugins/trig/trig_test.go @@ -5,8 +5,6 @@ import ( "testing" "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" - // "github.com/stretchr/testify/require" ) func TestTrig(t *testing.T) { @@ -27,7 +25,6 @@ func TestTrig(t *testing.T) { fields["sine"] = sine fields["cosine"] = cosine - assert.True(t, acc.CheckFieldsValue("trig", fields)) - + acc.AssertContainsFields(t, "trig", fields) } } diff --git a/plugins/zookeeper/zookeeper_test.go b/plugins/zookeeper/zookeeper_test.go index 075ca521d..354382ecc 100644 --- a/plugins/zookeeper/zookeeper_test.go +++ b/plugins/zookeeper/zookeeper_test.go @@ -38,6 +38,6 @@ func TestZookeeperGeneratesMetrics(t *testing.T) { } for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric), metric) + assert.True(t, acc.HasIntField("zookeeper", metric), metric) } } From 936c5a8a7a7055a50f8c8ed2304c7f1c877b4a20 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 6 Jan 2016 22:16:04 -0700 Subject: [PATCH 039/103] 0.3.0 unit tests: rethinkdb, twemproxy, zfs --- plugins/rethinkdb/rethinkdb_data_test.go | 6 +- plugins/twemproxy/twemproxy.go | 2 +- plugins/twemproxy/twemproxy_test.go | 91 +++- plugins/zfs/zfs_test.go | 625 +++++------------------ 4 files changed, 205 insertions(+), 519 deletions(-) diff --git a/plugins/rethinkdb/rethinkdb_data_test.go b/plugins/rethinkdb/rethinkdb_data_test.go index 4c76b2340..3441370a3 100644 --- a/plugins/rethinkdb/rethinkdb_data_test.go +++ b/plugins/rethinkdb/rethinkdb_data_test.go @@ -36,7 +36,7 @@ func TestAddEngineStats(t *testing.T) { engine.AddEngineStats(keys, &acc, tags) for _, metric := range keys { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("rethinkdb_engine", metric)) } } @@ -67,7 +67,7 @@ func TestAddEngineStatsPartial(t *testing.T) { engine.AddEngineStats(keys, &acc, tags) for _, metric := range missing_keys { - assert.False(t, acc.HasIntValue(metric)) + assert.False(t, acc.HasIntField("rethinkdb", metric)) } } @@ -107,6 +107,6 @@ func TestAddStorageStats(t *testing.T) { storage.AddStats(&acc, tags) for _, metric := range keys { - assert.True(t, acc.HasIntValue(metric)) + assert.True(t, acc.HasIntField("rethinkdb", metric)) } } diff --git a/plugins/twemproxy/twemproxy.go b/plugins/twemproxy/twemproxy.go index fe3fb6de5..268e465da 100644 --- a/plugins/twemproxy/twemproxy.go +++ b/plugins/twemproxy/twemproxy.go @@ -130,7 +130,7 @@ func (t *Twemproxy) processServer( } } } - acc.AddFields("twemproxy_pool", fields, tags) + acc.AddFields("twemproxy_pool_server", fields, tags) } // Tags is not expected to be mutated after passing to Add. diff --git a/plugins/twemproxy/twemproxy_test.go b/plugins/twemproxy/twemproxy_test.go index c941cc197..60209d1a1 100644 --- a/plugins/twemproxy/twemproxy_test.go +++ b/plugins/twemproxy/twemproxy_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -85,16 +84,13 @@ func TestGather(t *testing.T) { defer mockServer.Close() twemproxy := &Twemproxy{ - Instances: []TwemproxyInstance{ - TwemproxyInstance{ - Addr: sampleAddr, - Pools: []string{"demo"}, - }, - }, + Addr: sampleAddr, + Pools: []string{"demo"}, } var acc testutil.Accumulator - err = twemproxy.Instances[0].Gather(&acc) + acc.SetDebug(true) + err = twemproxy.Gather(&acc) require.NoError(t, err) var sourceData map[string]interface{} @@ -102,33 +98,74 @@ func TestGather(t *testing.T) { panic(err) } - metrics := []string{"total_connections", "curr_connections", "timestamp"} + fields := map[string]interface{}{ + "total_connections": float64(276448), + "curr_connections": float64(1322), + "timestamp": float64(1.447312436e+09), + } tags := map[string]string{ "twemproxy": sampleAddr, "source": sourceData["source"].(string), } - for _, m := range metrics { - assert.NoError(t, acc.ValidateTaggedValue(m, sourceData[m].(float64), tags)) - } + acc.AssertContainsTaggedFields(t, "twemproxy", fields, tags) poolName := "demo" - poolMetrics := []string{ - "client_connections", "forward_error", "client_err", "server_ejects", - "fragments", "client_eof", + poolFields := map[string]interface{}{ + "client_connections": float64(1305), + "client_eof": float64(126813), + "client_err": float64(147942), + "forward_error": float64(11684), + "fragments": float64(0), + "server_ejects": float64(0), } tags["pool"] = poolName - poolData := sourceData[poolName].(map[string]interface{}) - for _, m := range poolMetrics { - measurement := poolName + "_" + m - assert.NoError(t, acc.ValidateTaggedValue(measurement, poolData[m].(float64), tags)) + acc.AssertContainsTaggedFields(t, "twemproxy_pool", poolFields, tags) + + poolServerTags1 := map[string]string{ + "pool": "demo", + "server": "10.16.29.2:6379", + "source": "server1.website.com", + "twemproxy": "127.0.0.1:22222", } - poolServers := []string{"10.16.29.1:6379", "10.16.29.2:6379"} - for _, s := range poolServers { - tags["server"] = s - serverData := poolData[s].(map[string]interface{}) - for k, v := range serverData { - measurement := poolName + "_" + k - assert.NoError(t, acc.ValidateTaggedValue(measurement, v, tags)) - } + poolServerFields1 := map[string]interface{}{ + "in_queue": float64(0), + "in_queue_bytes": float64(0), + "out_queue": float64(0), + "out_queue_bytes": float64(0), + "request_bytes": float64(2.412114759e+09), + "requests": float64(3.7870211e+07), + "response_bytes": float64(5.228980582e+09), + "responses": float64(3.7869551e+07), + "server_connections": float64(1), + "server_ejected_at": float64(0), + "server_eof": float64(0), + "server_err": float64(0), + "server_timedout": float64(25), } + acc.AssertContainsTaggedFields(t, "twemproxy_pool_server", + poolServerFields1, poolServerTags1) + + poolServerTags2 := map[string]string{ + "pool": "demo", + "server": "10.16.29.1:6379", + "source": "server1.website.com", + "twemproxy": "127.0.0.1:22222", + } + poolServerFields2 := map[string]interface{}{ + "in_queue": float64(0), + "in_queue_bytes": float64(0), + "out_queue": float64(0), + "out_queue_bytes": float64(0), + "request_bytes": float64(2.7758404e+09), + "requests": float64(4.3604566e+07), + "response_bytes": float64(7.663182096e+09), + "responses": float64(4.36039e+07), + "server_connections": float64(1), + "server_ejected_at": float64(0), + "server_eof": float64(0), + "server_err": float64(0), + "server_timedout": float64(24), + } + acc.AssertContainsTaggedFields(t, "twemproxy_pool_server", + poolServerFields2, poolServerTags2) } diff --git a/plugins/zfs/zfs_test.go b/plugins/zfs/zfs_test.go index c81e4889a..9530084d0 100644 --- a/plugins/zfs/zfs_test.go +++ b/plugins/zfs/zfs_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -121,17 +120,12 @@ hits 4 0 misses 4 0 ` const pool_ioContents = `11 3 0x00 1 80 2225326830828 32953476980628 -nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt -1884160 6450688 22 978 272187126 2850519036 2263669418655 424226814 2850519036 2263669871823 0 0 +nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt +1884160 6450688 22 978 272187126 2850519036 2263669418655 424226814 2850519036 2263669871823 0 0 ` var testKstatPath = os.TempDir() + "/telegraf/proc/spl/kstat/zfs" -type metrics struct { - name string - value int64 -} - func TestZfsPoolMetrics(t *testing.T) { err := os.MkdirAll(testKstatPath, 0755) require.NoError(t, err) @@ -149,29 +143,24 @@ func TestZfsPoolMetrics(t *testing.T) { var acc testutil.Accumulator - //one pool, all metrics - tags := map[string]string{ - "pool": "HOME", - } - z := &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} err = z.Gather(&acc) require.NoError(t, err) - for _, metric := range poolMetrics { - assert.True(t, !acc.HasIntValue(metric.name), metric.name) - assert.True(t, !acc.CheckTaggedValue(metric.name, metric.value, tags)) - } + require.False(t, acc.HasMeasurement("zfs_pool")) + acc.Points = nil z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}, PoolMetrics: true} err = z.Gather(&acc) require.NoError(t, err) - for _, metric := range poolMetrics { - assert.True(t, acc.HasIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) + //one pool, all metrics + tags := map[string]string{ + "pool": "HOME", } + acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags) + err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) } @@ -208,10 +197,8 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = z.Gather(&acc) require.NoError(t, err) - for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } + acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) + acc.Points = nil //two pools, all metrics err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) @@ -229,10 +216,8 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = z.Gather(&acc) require.NoError(t, err) - for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } + acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) + acc.Points = nil intMetrics = getKstatMetricsArcOnly() @@ -242,476 +227,140 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = z.Gather(&acc) require.NoError(t, err) - for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric.name), metric.name) - assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) - } + acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) } -func getKstatMetricsArcOnly() []*metrics { - return []*metrics{ - { - name: "arcstats_hits", - value: 5968846374, - }, - { - name: "arcstats_misses", - value: 1659178751, - }, - { - name: "arcstats_demand_data_hits", - value: 4860247322, - }, - { - name: "arcstats_demand_data_misses", - value: 501499535, - }, - { - name: "arcstats_demand_metadata_hits", - value: 708608325, - }, - { - name: "arcstats_demand_metadata_misses", - value: 156591375, - }, - { - name: "arcstats_prefetch_data_hits", - value: 367047144, - }, - { - name: "arcstats_prefetch_data_misses", - value: 974529898, - }, - { - name: "arcstats_prefetch_metadata_hits", - value: 32943583, - }, - { - name: "arcstats_prefetch_metadata_misses", - value: 26557943, - }, - { - name: "arcstats_mru_hits", - value: 301176811, - }, - { - name: "arcstats_mru_ghost_hits", - value: 47066067, - }, - { - name: "arcstats_mfu_hits", - value: 5520612438, - }, - { - name: "arcstats_mfu_ghost_hits", - value: 45784009, - }, - { - name: "arcstats_deleted", - value: 1718937704, - }, - { - name: "arcstats_recycle_miss", - value: 481222994, - }, - { - name: "arcstats_mutex_miss", - value: 20575623, - }, - { - name: "arcstats_evict_skip", - value: 14655903906543, - }, - { - name: "arcstats_evict_l2_cached", - value: 145310202998272, - }, - { - name: "arcstats_evict_l2_eligible", - value: 16345402777088, - }, - { - name: "arcstats_evict_l2_ineligible", - value: 7437226893312, - }, - { - name: "arcstats_hash_elements", - value: 36617980, - }, - { - name: "arcstats_hash_elements_max", - value: 36618318, - }, - { - name: "arcstats_hash_collisions", - value: 554145157, - }, - { - name: "arcstats_hash_chains", - value: 4187651, - }, - { - name: "arcstats_hash_chain_max", - value: 26, - }, - { - name: "arcstats_p", - value: 13963222064, - }, - { - name: "arcstats_c", - value: 16381258376, - }, - { - name: "arcstats_c_min", - value: 4194304, - }, - { - name: "arcstats_c_max", - value: 16884125696, - }, - { - name: "arcstats_size", - value: 16319887096, - }, - { - name: "arcstats_hdr_size", - value: 42567864, - }, - { - name: "arcstats_data_size", - value: 60066304, - }, - { - name: "arcstats_meta_size", - value: 1701534208, - }, - { - name: "arcstats_other_size", - value: 1661543168, - }, - { - name: "arcstats_anon_size", - value: 94720, - }, - { - name: "arcstats_anon_evict_data", - value: 0, - }, - { - name: "arcstats_anon_evict_metadata", - value: 0, - }, - { - name: "arcstats_mru_size", - value: 973099008, - }, - { - name: "arcstats_mru_evict_data", - value: 9175040, - }, - { - name: "arcstats_mru_evict_metadata", - value: 32768, - }, - { - name: "arcstats_mru_ghost_size", - value: 32768, - }, - { - name: "arcstats_mru_ghost_evict_data", - value: 0, - }, - { - name: "arcstats_mru_ghost_evict_metadata", - value: 32768, - }, - { - name: "arcstats_mfu_size", - value: 788406784, - }, - { - name: "arcstats_mfu_evict_data", - value: 50881024, - }, - { - name: "arcstats_mfu_evict_metadata", - value: 81920, - }, - { - name: "arcstats_mfu_ghost_size", - value: 0, - }, - { - name: "arcstats_mfu_ghost_evict_data", - value: 0, - }, - { - name: "arcstats_mfu_ghost_evict_metadata", - value: 0, - }, - { - name: "arcstats_l2_hits", - value: 573868618, - }, - { - name: "arcstats_l2_misses", - value: 1085309718, - }, - { - name: "arcstats_l2_feeds", - value: 12182087, - }, - { - name: "arcstats_l2_rw_clash", - value: 9610, - }, - { - name: "arcstats_l2_read_bytes", - value: 32695938336768, - }, - { - name: "arcstats_l2_write_bytes", - value: 2826774778880, - }, - { - name: "arcstats_l2_writes_sent", - value: 4267687, - }, - { - name: "arcstats_l2_writes_done", - value: 4267687, - }, - { - name: "arcstats_l2_writes_error", - value: 0, - }, - { - name: "arcstats_l2_writes_hdr_miss", - value: 164, - }, - { - name: "arcstats_l2_evict_lock_retry", - value: 5, - }, - { - name: "arcstats_l2_evict_reading", - value: 0, - }, - { - name: "arcstats_l2_free_on_write", - value: 1606914, - }, - { - name: "arcstats_l2_cdata_free_on_write", - value: 1775, - }, - { - name: "arcstats_l2_abort_lowmem", - value: 83462, - }, - { - name: "arcstats_l2_cksum_bad", - value: 393860640, - }, - { - name: "arcstats_l2_io_error", - value: 53881460, - }, - { - name: "arcstats_l2_size", - value: 2471466648576, - }, - { - name: "arcstats_l2_asize", - value: 2461690072064, - }, - { - name: "arcstats_l2_hdr_size", - value: 12854175552, - }, - { - name: "arcstats_l2_compress_successes", - value: 12184849, - }, - { - name: "arcstats_l2_compress_zeros", - value: 0, - }, - { - name: "arcstats_l2_compress_failures", - value: 0, - }, - { - name: "arcstats_memory_throttle_count", - value: 0, - }, - { - name: "arcstats_duplicate_buffers", - value: 0, - }, - { - name: "arcstats_duplicate_buffers_size", - value: 0, - }, - { - name: "arcstats_duplicate_reads", - value: 0, - }, - { - name: "arcstats_memory_direct_count", - value: 5159942, - }, - { - name: "arcstats_memory_indirect_count", - value: 3034640, - }, - { - name: "arcstats_arc_no_grow", - value: 0, - }, - { - name: "arcstats_arc_tempreserve", - value: 0, - }, - { - name: "arcstats_arc_loaned_bytes", - value: 0, - }, - { - name: "arcstats_arc_prune", - value: 114554259559, - }, - { - name: "arcstats_arc_meta_used", - value: 16259820792, - }, - { - name: "arcstats_arc_meta_limit", - value: 12663094272, - }, - { - name: "arcstats_arc_meta_max", - value: 18327165696, - }, +func getKstatMetricsArcOnly() map[string]interface{} { + return map[string]interface{}{ + "arcstats_hits": int64(5968846374), + "arcstats_misses": int64(1659178751), + "arcstats_demand_data_hits": int64(4860247322), + "arcstats_demand_data_misses": int64(501499535), + "arcstats_demand_metadata_hits": int64(708608325), + "arcstats_demand_metadata_misses": int64(156591375), + "arcstats_prefetch_data_hits": int64(367047144), + "arcstats_prefetch_data_misses": int64(974529898), + "arcstats_prefetch_metadata_hits": int64(32943583), + "arcstats_prefetch_metadata_misses": int64(26557943), + "arcstats_mru_hits": int64(301176811), + "arcstats_mru_ghost_hits": int64(47066067), + "arcstats_mfu_hits": int64(5520612438), + "arcstats_mfu_ghost_hits": int64(45784009), + "arcstats_deleted": int64(1718937704), + "arcstats_recycle_miss": int64(481222994), + "arcstats_mutex_miss": int64(20575623), + "arcstats_evict_skip": int64(14655903906543), + "arcstats_evict_l2_cached": int64(145310202998272), + "arcstats_evict_l2_eligible": int64(16345402777088), + "arcstats_evict_l2_ineligible": int64(7437226893312), + "arcstats_hash_elements": int64(36617980), + "arcstats_hash_elements_max": int64(36618318), + "arcstats_hash_collisions": int64(554145157), + "arcstats_hash_chains": int64(4187651), + "arcstats_hash_chain_max": int64(26), + "arcstats_p": int64(13963222064), + "arcstats_c": int64(16381258376), + "arcstats_c_min": int64(4194304), + "arcstats_c_max": int64(16884125696), + "arcstats_size": int64(16319887096), + "arcstats_hdr_size": int64(42567864), + "arcstats_data_size": int64(60066304), + "arcstats_meta_size": int64(1701534208), + "arcstats_other_size": int64(1661543168), + "arcstats_anon_size": int64(94720), + "arcstats_anon_evict_data": int64(0), + "arcstats_anon_evict_metadata": int64(0), + "arcstats_mru_size": int64(973099008), + "arcstats_mru_evict_data": int64(9175040), + "arcstats_mru_evict_metadata": int64(32768), + "arcstats_mru_ghost_size": int64(32768), + "arcstats_mru_ghost_evict_data": int64(0), + "arcstats_mru_ghost_evict_metadata": int64(32768), + "arcstats_mfu_size": int64(788406784), + "arcstats_mfu_evict_data": int64(50881024), + "arcstats_mfu_evict_metadata": int64(81920), + "arcstats_mfu_ghost_size": int64(0), + "arcstats_mfu_ghost_evict_data": int64(0), + "arcstats_mfu_ghost_evict_metadata": int64(0), + "arcstats_l2_hits": int64(573868618), + "arcstats_l2_misses": int64(1085309718), + "arcstats_l2_feeds": int64(12182087), + "arcstats_l2_rw_clash": int64(9610), + "arcstats_l2_read_bytes": int64(32695938336768), + "arcstats_l2_write_bytes": int64(2826774778880), + "arcstats_l2_writes_sent": int64(4267687), + "arcstats_l2_writes_done": int64(4267687), + "arcstats_l2_writes_error": int64(0), + "arcstats_l2_writes_hdr_miss": int64(164), + "arcstats_l2_evict_lock_retry": int64(5), + "arcstats_l2_evict_reading": int64(0), + "arcstats_l2_free_on_write": int64(1606914), + "arcstats_l2_cdata_free_on_write": int64(1775), + "arcstats_l2_abort_lowmem": int64(83462), + "arcstats_l2_cksum_bad": int64(393860640), + "arcstats_l2_io_error": int64(53881460), + "arcstats_l2_size": int64(2471466648576), + "arcstats_l2_asize": int64(2461690072064), + "arcstats_l2_hdr_size": int64(12854175552), + "arcstats_l2_compress_successes": int64(12184849), + "arcstats_l2_compress_zeros": int64(0), + "arcstats_l2_compress_failures": int64(0), + "arcstats_memory_throttle_count": int64(0), + "arcstats_duplicate_buffers": int64(0), + "arcstats_duplicate_buffers_size": int64(0), + "arcstats_duplicate_reads": int64(0), + "arcstats_memory_direct_count": int64(5159942), + "arcstats_memory_indirect_count": int64(3034640), + "arcstats_arc_no_grow": int64(0), + "arcstats_arc_tempreserve": int64(0), + "arcstats_arc_loaned_bytes": int64(0), + "arcstats_arc_prune": int64(114554259559), + "arcstats_arc_meta_used": int64(16259820792), + "arcstats_arc_meta_limit": int64(12663094272), + "arcstats_arc_meta_max": int64(18327165696), } } -func getKstatMetricsAll() []*metrics { - otherMetrics := []*metrics{ - { - name: "zfetchstats_hits", - value: 7812959060, - }, - { - name: "zfetchstats_misses", - value: 4154484207, - }, - { - name: "zfetchstats_colinear_hits", - value: 1366368, - }, - { - name: "zfetchstats_colinear_misses", - value: 4153117839, - }, - { - name: "zfetchstats_stride_hits", - value: 7309776732, - }, - { - name: "zfetchstats_stride_misses", - value: 222766182, - }, - { - name: "zfetchstats_reclaim_successes", - value: 107788388, - }, - { - name: "zfetchstats_reclaim_failures", - value: 4045329451, - }, - { - name: "zfetchstats_streams_resets", - value: 20989756, - }, - { - name: "zfetchstats_streams_noresets", - value: 503182328, - }, - { - name: "zfetchstats_bogus_streams", - value: 0, - }, - { - name: "vdev_cache_stats_delegations", - value: 0, - }, - { - name: "vdev_cache_stats_hits", - value: 0, - }, - { - name: "vdev_cache_stats_misses", - value: 0, - }, +func getKstatMetricsAll() map[string]interface{} { + otherMetrics := map[string]interface{}{ + "zfetchstats_hits": int64(7812959060), + "zfetchstats_misses": int64(4154484207), + "zfetchstats_colinear_hits": int64(1366368), + "zfetchstats_colinear_misses": int64(4153117839), + "zfetchstats_stride_hits": int64(7309776732), + "zfetchstats_stride_misses": int64(222766182), + "zfetchstats_reclaim_successes": int64(107788388), + "zfetchstats_reclaim_failures": int64(4045329451), + "zfetchstats_streams_resets": int64(20989756), + "zfetchstats_streams_noresets": int64(503182328), + "zfetchstats_bogus_streams": int64(0), + "vdev_cache_stats_delegations": int64(0), + "vdev_cache_stats_hits": int64(0), + "vdev_cache_stats_misses": int64(0), } - - return append(getKstatMetricsArcOnly(), otherMetrics...) + arcMetrics := getKstatMetricsArcOnly() + for k, v := range otherMetrics { + arcMetrics[k] = v + } + return arcMetrics } -func getPoolMetrics() []*metrics { - return []*metrics{ - { - name: "nread", - value: 1884160, - }, - { - name: "nwritten", - value: 6450688, - }, - { - name: "reads", - value: 22, - }, - { - name: "writes", - value: 978, - }, - { - name: "wtime", - value: 272187126, - }, - { - name: "wlentime", - value: 2850519036, - }, - { - name: "wupdate", - value: 2263669418655, - }, - { - name: "rtime", - value: 424226814, - }, - { - name: "rlentime", - value: 2850519036, - }, - { - name: "rupdate", - value: 2263669871823, - }, - { - name: "wcnt", - value: 0, - }, - { - name: "rcnt", - value: 0, - }, +func getPoolMetrics() map[string]interface{} { + return map[string]interface{}{ + "nread": int64(1884160), + "nwritten": int64(6450688), + "reads": int64(22), + "writes": int64(978), + "wtime": int64(272187126), + "wlentime": int64(2850519036), + "wupdate": int64(2263669418655), + "rtime": int64(424226814), + "rlentime": int64(2850519036), + "rupdate": int64(2263669871823), + "wcnt": int64(0), + "rcnt": int64(0), } } From b9869eadc3e815744ab31bf57e4d776f18882699 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Jan 2016 01:11:52 -0700 Subject: [PATCH 040/103] 0.3.0 unit tests: influxdb --- plugins/influxdb/influxdb_test.go | 47 +++++++++++++++---------------- 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/plugins/influxdb/influxdb_test.go b/plugins/influxdb/influxdb_test.go index a6c9af56a..7911156de 100644 --- a/plugins/influxdb/influxdb_test.go +++ b/plugins/influxdb/influxdb_test.go @@ -72,29 +72,26 @@ func TestBasic(t *testing.T) { require.NoError(t, plugin.Gather(&acc)) require.Len(t, acc.Points, 2) - require.NoError(t, acc.ValidateTaggedFieldsValue( - "foo", - map[string]interface{}{ - // JSON will truncate floats to integer representations. - // Since there's no distinction in JSON, we can't assume it's an int. - "i": -1.0, - "f": 0.5, - "b": true, - "s": "string", - }, - map[string]string{ - "id": "ex1", - "url": fakeServer.URL + "/endpoint", - }, - )) - require.NoError(t, acc.ValidateTaggedFieldsValue( - "bar", - map[string]interface{}{ - "x": "x", - }, - map[string]string{ - "id": "ex2", - "url": fakeServer.URL + "/endpoint", - }, - )) + fields := map[string]interface{}{ + // JSON will truncate floats to integer representations. + // Since there's no distinction in JSON, we can't assume it's an int. + "i": -1.0, + "f": 0.5, + "b": true, + "s": "string", + } + tags := map[string]string{ + "id": "ex1", + "url": fakeServer.URL + "/endpoint", + } + acc.AssertContainsTaggedFields(t, "foo", fields, tags) + + fields = map[string]interface{}{ + "x": "x", + } + tags = map[string]string{ + "id": "ex2", + "url": fakeServer.URL + "/endpoint", + } + acc.AssertContainsTaggedFields(t, "bar", fields, tags) } From 0e398f5802bf81ba780c4cfe0826e9535a66f3ac Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Jan 2016 10:09:04 -0700 Subject: [PATCH 041/103] 0.3.0 unit tests: amon, datadog, librato --- outputs/amon/amon_test.go | 21 +++++++++++---------- outputs/datadog/datadog_test.go | 7 ++++--- outputs/librato/librato_test.go | 22 +++++++++++++++------- 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/outputs/amon/amon_test.go b/outputs/amon/amon_test.go index a220a304d..cfe4e9f23 100644 --- a/outputs/amon/amon_test.go +++ b/outputs/amon/amon_test.go @@ -18,7 +18,7 @@ func TestBuildPoint(t *testing.T) { err error }{ { - testutil.TestPoint(float64(0.0)), + testutil.TestPoint(float64(0.0), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 0.0, @@ -26,7 +26,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(float64(1.0)), + testutil.TestPoint(float64(1.0), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 1.0, @@ -34,7 +34,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(int(10)), + testutil.TestPoint(int(10), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 10.0, @@ -42,7 +42,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(int32(112345)), + testutil.TestPoint(int32(112345), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 112345.0, @@ -50,7 +50,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(int64(112345)), + testutil.TestPoint(int64(112345), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 112345.0, @@ -58,7 +58,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint(float32(11234.5)), + testutil.TestPoint(float32(11234.5), "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 11234.5, @@ -66,7 +66,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestPoint("11234.5"), + testutil.TestPoint("11234.5", "testpt"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 11234.5, @@ -75,15 +75,16 @@ func TestBuildPoint(t *testing.T) { }, } for _, tt := range tagtests { - pt, err := buildPoint(tt.ptIn) + pt, err := buildPoints(tt.ptIn) if err != nil && tt.err == nil { t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err) } if tt.err != nil && err == nil { t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error()) } - if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil { - t.Errorf("%s: \nexpected %+v\ngot %+v\n", tt.ptIn.Name(), tt.outPt, pt) + if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil { + t.Errorf("%s: \nexpected %+v\ngot %+v\n", + tt.ptIn.Name(), tt.outPt, pt["value"]) } } } diff --git a/outputs/datadog/datadog_test.go b/outputs/datadog/datadog_test.go index 49da0a7b3..fe0b7c1fe 100644 --- a/outputs/datadog/datadog_test.go +++ b/outputs/datadog/datadog_test.go @@ -162,15 +162,16 @@ func TestBuildPoint(t *testing.T) { }, } for _, tt := range tagtests { - pt, err := buildPoint(tt.ptIn) + pt, err := buildPoints(tt.ptIn) if err != nil && tt.err == nil { t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err) } if tt.err != nil && err == nil { t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error()) } - if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil { - t.Errorf("%s: \nexpected %+v\ngot %+v\n", tt.ptIn.Name(), tt.outPt, pt) + if !reflect.DeepEqual(pt["value"], tt.outPt) && tt.err == nil { + t.Errorf("%s: \nexpected %+v\ngot %+v\n", + tt.ptIn.Name(), tt.outPt, pt["value"]) } } } diff --git a/outputs/librato/librato_test.go b/outputs/librato/librato_test.go index 71d726769..129352027 100644 --- a/outputs/librato/librato_test.go +++ b/outputs/librato/librato_test.go @@ -142,15 +142,20 @@ func TestBuildGauge(t *testing.T) { l := NewLibrato(fakeUrl) for _, gt := range gaugeTests { - gauge, err := l.buildGauge(gt.ptIn) + gauges, err := l.buildGauges(gt.ptIn) if err != nil && gt.err == nil { t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err) } if gt.err != nil && err == nil { - t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error()) + t.Errorf("%s: expected an error (%s) but none returned", + gt.ptIn.Name(), gt.err.Error()) } - if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil { - t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge) + if len(gauges) == 0 { + continue + } + if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) { + t.Errorf("%s: \nexpected %+v\ngot %+v\n", + gt.ptIn.Name(), gt.outGauge, gauges[0]) } } } @@ -198,15 +203,18 @@ func TestBuildGaugeWithSource(t *testing.T) { l := NewLibrato(fakeUrl) l.SourceTag = "hostname" for _, gt := range gaugeTests { - gauge, err := l.buildGauge(gt.ptIn) + gauges, err := l.buildGauges(gt.ptIn) if err != nil && gt.err == nil { t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err) } if gt.err != nil && err == nil { t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error()) } - if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil { - t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge) + if len(gauges) == 0 { + continue + } + if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) { + t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauges[0]) } } } From 4fdcb136bcc5d282ed5e3daaabf1b51c23356769 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Jan 2016 10:23:38 -0700 Subject: [PATCH 042/103] 0.3.0 unit tests: internal --- internal/config/config_test.go | 22 +++++++------------ internal/config/testdata/subconfig/exec.conf | 2 -- .../config/testdata/subconfig/procstat.conf | 3 --- 3 files changed, 8 insertions(+), 19 deletions(-) diff --git a/internal/config/config_test.go b/internal/config/config_test.go index f6b929976..eeb1f7fae 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -39,6 +39,7 @@ func TestConfig_LoadSinglePlugin(t *testing.T) { }, Interval: 5 * time.Second, } + mConfig.Tags = make(map[string]string) assert.Equal(t, memcached, c.Plugins[0].Plugin, "Testdata did not produce a correct memcached struct.") @@ -81,19 +82,18 @@ func TestConfig_LoadDirectory(t *testing.T) { }, Interval: 5 * time.Second, } + mConfig.Tags = make(map[string]string) + assert.Equal(t, memcached, c.Plugins[0].Plugin, "Testdata did not produce a correct memcached struct.") assert.Equal(t, mConfig, c.Plugins[0].Config, "Testdata did not produce correct memcached metadata.") ex := plugins.Plugins["exec"]().(*exec.Exec) - ex.Commands = []*exec.Command{ - &exec.Command{ - Command: "/usr/bin/myothercollector --foo=bar", - Name: "myothercollector", - }, - } + ex.Command = "/usr/bin/myothercollector --foo=bar" + ex.Name = "myothercollector" eConfig := &PluginConfig{Name: "exec"} + eConfig.Tags = make(map[string]string) assert.Equal(t, ex, c.Plugins[1].Plugin, "Merged Testdata did not produce a correct exec struct.") assert.Equal(t, eConfig, c.Plugins[1].Config, @@ -106,16 +106,10 @@ func TestConfig_LoadDirectory(t *testing.T) { "Testdata did not produce correct memcached metadata.") pstat := plugins.Plugins["procstat"]().(*procstat.Procstat) - pstat.Specifications = []*procstat.Specification{ - &procstat.Specification{ - PidFile: "/var/run/grafana-server.pid", - }, - &procstat.Specification{ - PidFile: "/var/run/influxdb/influxd.pid", - }, - } + pstat.PidFile = "/var/run/grafana-server.pid" pConfig := &PluginConfig{Name: "procstat"} + pConfig.Tags = make(map[string]string) assert.Equal(t, pstat, c.Plugins[3].Plugin, "Merged Testdata did not produce a correct procstat struct.") diff --git a/internal/config/testdata/subconfig/exec.conf b/internal/config/testdata/subconfig/exec.conf index 552441031..80aca00e8 100644 --- a/internal/config/testdata/subconfig/exec.conf +++ b/internal/config/testdata/subconfig/exec.conf @@ -1,6 +1,4 @@ [[plugins.exec]] - # specify commands via an array of tables - [[plugins.exec.commands]] # the command to run command = "/usr/bin/myothercollector --foo=bar" diff --git a/internal/config/testdata/subconfig/procstat.conf b/internal/config/testdata/subconfig/procstat.conf index 33f288d84..9f23e8d58 100644 --- a/internal/config/testdata/subconfig/procstat.conf +++ b/internal/config/testdata/subconfig/procstat.conf @@ -1,5 +1,2 @@ [[plugins.procstat]] - [[plugins.procstat.specifications]] pid_file = "/var/run/grafana-server.pid" - [[plugins.procstat.specifications]] - pid_file = "/var/run/influxdb/influxd.pid" From 64b98a9b61ba85f975823aab0ae6e7c59c3b323e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Jan 2016 10:52:46 -0700 Subject: [PATCH 043/103] 0.3.0 unit tests: agent and prometheus --- agent_test.go | 41 +++++++++---- internal/config/testdata/telegraf-agent.toml | 61 ++++++------------- .../prometheus_client_test.go | 10 +-- 3 files changed, 53 insertions(+), 59 deletions(-) diff --git a/agent_test.go b/agent_test.go index 7dd65ef26..c08fbe292 100644 --- a/agent_test.go +++ b/agent_test.go @@ -16,31 +16,36 @@ import ( func TestAgent_LoadPlugin(t *testing.T) { c := config.NewConfig() c.PluginFilters = []string{"mysql"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ := NewAgent(c) assert.Equal(t, 1, len(a.Config.Plugins)) c = config.NewConfig() c.PluginFilters = []string{"foo"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 0, len(a.Config.Plugins)) c = config.NewConfig() c.PluginFilters = []string{"mysql", "foo"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 1, len(a.Config.Plugins)) c = config.NewConfig() c.PluginFilters = []string{"mysql", "redis"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Plugins)) c = config.NewConfig() c.PluginFilters = []string{"mysql", "foo", "redis", "bar"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Plugins)) } @@ -48,37 +53,51 @@ func TestAgent_LoadPlugin(t *testing.T) { func TestAgent_LoadOutput(t *testing.T) { c := config.NewConfig() c.OutputFilters = []string{"influxdb"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ := NewAgent(c) assert.Equal(t, 2, len(a.Config.Outputs)) + c = config.NewConfig() + c.OutputFilters = []string{"kafka"} + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) + a, _ = NewAgent(c) + assert.Equal(t, 1, len(a.Config.Outputs)) + c = config.NewConfig() c.OutputFilters = []string{} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"foo"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 0, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "foo"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "kafka"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) + assert.Equal(t, 3, len(c.Outputs)) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"} - c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") + assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) } diff --git a/internal/config/testdata/telegraf-agent.toml b/internal/config/testdata/telegraf-agent.toml index 42ce89cd8..7b8cdb2bb 100644 --- a/internal/config/testdata/telegraf-agent.toml +++ b/internal/config/testdata/telegraf-agent.toml @@ -21,20 +21,13 @@ # Tags can also be specified via a normal map, but only one form at a time: [tags] - # dc = "us-east-1" + dc = "us-east-1" # Configuration for telegraf agent [agent] # Default data collection interval for all plugins interval = "10s" - # If utc = false, uses local time (utc is highly recommended) - utc = true - - # Precision of writes, valid values are n, u, ms, s, m, and h - # note: using second precision greatly helps InfluxDB compression - precision = "s" - # run telegraf in debug mode debug = false @@ -58,17 +51,6 @@ # The target database for metrics. This database must already exist database = "telegraf" # required. - # Connection timeout (for the connection with InfluxDB), formatted as a string. - # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". - # If not provided, will default to 0 (no timeout) - # timeout = "5s" - - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - - # Set the user agent for the POSTs (can be useful for log differentiation) - # user_agent = "telegraf" - [[outputs.influxdb]] urls = ["udp://localhost:8089"] database = "udp-telegraf" @@ -92,8 +74,8 @@ # Read Apache status information (mod_status) [[plugins.apache]] -# An array of Apache status URI to gather stats. -urls = ["http://localhost/server-status?auto"] + # An array of Apache status URI to gather stats. + urls = ["http://localhost/server-status?auto"] # Read metrics about cpu usage [[plugins.cpu]] @@ -128,8 +110,6 @@ urls = ["http://localhost/server-status?auto"] # Read flattened metrics from one or more commands that output JSON to stdout [[plugins.exec]] - # specify commands via an array of tables - [[exec.commands]] # the command to run command = "/usr/bin/mycollector --foo=bar" @@ -148,28 +128,25 @@ urls = ["http://localhost/server-status?auto"] # Read flattened metrics from one or more JSON HTTP endpoints [[plugins.httpjson]] - # Specify services via an array of tables - [[httpjson.services]] + # a name for the service being polled + name = "webserver_stats" - # a name for the service being polled - name = "webserver_stats" + # URL of each server in the service's cluster + servers = [ + "http://localhost:9999/stats/", + "http://localhost:9998/stats/", + ] - # URL of each server in the service's cluster - servers = [ - "http://localhost:9999/stats/", - "http://localhost:9998/stats/", - ] + # HTTP method to use (case-sensitive) + method = "GET" - # HTTP method to use (case-sensitive) - method = "GET" - - # HTTP parameters (all values must be strings) - [httpjson.services.parameters] - event_type = "cpu_spike" - threshold = "0.75" + # HTTP parameters (all values must be strings) + [httpjson.parameters] + event_type = "cpu_spike" + threshold = "0.75" # Read metrics about disk IO by device -[[plugins.io]] +[[plugins.diskio]] # no configuration # read metrics from a Kafka topic @@ -261,9 +238,6 @@ urls = ["http://localhost/server-status?auto"] # Read metrics from one or many postgresql servers [[plugins.postgresql]] - # specify servers via an array of tables - [[postgresql.servers]] - # specify address via a url matching: # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: @@ -297,7 +271,6 @@ urls = ["http://localhost/server-status?auto"] # Read metrics from one or many RabbitMQ servers via the management API [[plugins.rabbitmq]] # Specify servers via an array of tables - [[rabbitmq.servers]] # name = "rmq-server-1" # optional tag # url = "http://localhost:15672" # username = "guest" diff --git a/outputs/prometheus_client/prometheus_client_test.go b/outputs/prometheus_client/prometheus_client_test.go index 6bb1ec614..53adcac17 100644 --- a/outputs/prometheus_client/prometheus_client_test.go +++ b/outputs/prometheus_client/prometheus_client_test.go @@ -3,11 +3,11 @@ package prometheus_client import ( "testing" + "github.com/stretchr/testify/require" + "github.com/influxdb/influxdb/client/v2" "github.com/influxdb/telegraf/plugins/prometheus" "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var pTesting *PrometheusClient @@ -48,7 +48,8 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) { require.NoError(t, p.Gather(&acc)) for _, e := range expected { - assert.NoError(t, acc.ValidateValue(e.name, e.value)) + acc.AssertContainsFields(t, "prometheus_"+e.name, + map[string]interface{}{"value": e.value}) } } @@ -88,7 +89,8 @@ func TestPrometheusWritePointTag(t *testing.T) { require.NoError(t, p.Gather(&acc)) for _, e := range expected { - assert.True(t, acc.CheckTaggedValue(e.name, e.value, tags)) + acc.AssertContainsFields(t, "prometheus_"+e.name, + map[string]interface{}{"value": e.value}) } } From ad4af06802b1996a5ec317011346cdb682dad450 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Jan 2016 10:58:53 -0700 Subject: [PATCH 044/103] Update Makefile and Godeps and various fixups --- Godeps | 38 +++++++++++++++++------------------ circle.yml | 6 +++--- plugins/system/docker_test.go | 1 - 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/Godeps b/Godeps index d17d8dd25..2584179bd 100644 --- a/Godeps +++ b/Godeps @@ -1,51 +1,51 @@ git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034 -github.com/Shopify/sarama 159e9990b0796511607dd0d7aaa3eb37d1829d16 +github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81 github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339 -github.com/armon/go-metrics 06b60999766278efd6d2b5d8418a58c3d5b99e87 -github.com/aws/aws-sdk-go 999b1591218c36d5050d1ba7266eba956e65965f +github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757 +github.com/aws/aws-sdk-go f09322ae1e6468fe828c862542389bc45baf3c00 github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d -github.com/boltdb/bolt b34b35ea8d06bb9ae69d9a349119252e4c1d8ee0 +github.com/boltdb/bolt 34a0fa5307f7562980fb8e7ff4723f7987edf49b github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 github.com/dancannon/gorethink a124c9663325ed9f7fb669d17c69961b59151e6e github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d -github.com/eapache/go-resiliency f341fb4dca45128e4aa86389fa6a675d55fe25e1 +github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 -github.com/fsouza/go-dockerclient 7177a9e3543b0891a5d91dbf7051e0f71455c8ef -github.com/go-ini/ini 9314fb0ef64171d6a3d0a4fa570dfa33441cba05 -github.com/go-sql-driver/mysql d512f204a577a4ab037a1816604c48c9c13210be -github.com/gogo/protobuf e492fd34b12d0230755c45aa5fb1e1eea6a84aa9 -github.com/golang/protobuf 68415e7123da32b07eab49c96d2c4d6158360e9b +github.com/fsouza/go-dockerclient 175e1df973274f04e9b459a62cffc49808f1a649 +github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3 +github.com/go-sql-driver/mysql 7a8740a6bd8feb6af5786ab9a9f1513970019d8c +github.com/gogo/protobuf 7b1331554dbe882cb3613ee8f1824a5583627963 +github.com/golang/protobuf 2402d76f3d41f928c7902a765dfc872356dd3aad github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 -github.com/hailocab/go-hostpool 0637eae892be221164aff5fcbccc57171aea6406 +github.com/hailocab/go-hostpool 50839ee41f32bfca8d03a183031aa634b2dc1c64 github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458 github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294 github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee -github.com/influxdb/influxdb 69a7664f2d4b75aec300b7cbfc7e57c971721f04 +github.com/influxdb/influxdb bd63489ef0faae2465ae5b1f0a28bd7e71e02e38 github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 -github.com/klauspost/crc32 0aff1ea9c20474c3901672b5b6ead0ac611156de +github.com/klauspost/crc32 a3b15ae34567abb20a22992b989cd76f48d09c47 github.com/lib/pq 11fc39a580a008f1f39bb3d11d984fb34ed778d9 github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9 github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f -github.com/pborman/uuid cccd189d45f7ac3368a0d127efb7f4d08ae0b655 +github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988 github.com/pmezard/go-difflib e8554b8641db39598be7f6342874b958f12ae1d4 github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common 56b90312e937d43b930f06a59bf0d6a4ae1944bc +github.com/prometheus/common 0a3005bb37bc411040083a55372e77c405f6464c github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil fc932d9090f13a84fb4b3cb8baa124610cab184c +github.com/shirou/gopsutil ef151b7ff7fe76308f89a389447b7b78dfa02e0f github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 -github.com/stretchr/testify e3a8ff8ce36581f87a15341206f205b1da467059 +github.com/stretchr/testify c92828f29518bc633893affbce12904ba41a7cfa github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 -golang.org/x/crypto 7b85b097bf7527677d54d3220065e966a0e3b613 -golang.org/x/net 1796f9b8b7178e3c7587dff118d3bb9d37f9b0b3 +golang.org/x/crypto f23ba3a5ee43012fcb4b92e1a2a405a92554f4f2 +golang.org/x/net 520af5de654dc4dd4f0f65aa40e66dbbd9043df1 gopkg.in/dancannon/gorethink.v1 a124c9663325ed9f7fb669d17c69961b59151e6e gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49 diff --git a/circle.yml b/circle.yml index c1c2d35ab..2d006d70e 100644 --- a/circle.yml +++ b/circle.yml @@ -4,9 +4,9 @@ machine: post: - sudo service zookeeper stop - go version - - go version | grep 1.5.1 || sudo rm -rf /usr/local/go - - wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz - - sudo tar -C /usr/local -xzf go1.5.1.linux-amd64.tar.gz + - go version | grep 1.5.2 || sudo rm -rf /usr/local/go + - wget https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz + - sudo tar -C /usr/local -xzf go1.5.2.linux-amd64.tar.gz - go version dependencies: diff --git a/plugins/system/docker_test.go b/plugins/system/docker_test.go index eb9cb41c7..9ed06dd3e 100644 --- a/plugins/system/docker_test.go +++ b/plugins/system/docker_test.go @@ -9,7 +9,6 @@ import ( "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/docker" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) From 30d24a3c1c9e8b2fbc50189d4300f57ff8d926ca Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Jan 2016 13:02:59 -0700 Subject: [PATCH 045/103] 0.3.0 documentation changes and improvements --- CONFIGURATION.md | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/CONFIGURATION.md b/CONFIGURATION.md index c1040df2d..c7b785edc 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -1,5 +1,14 @@ # Telegraf Configuration +## Generating a config file + +A default Telegraf config file can be generated using the `-sample-config` flag, +like this: `telegraf -sample-config` + +To generate a file with specific collectors and outputs, you can use the +`-filter` and `-outputfilter` flags, like this: +`telegraf -sample-config -filter cpu:mem:net:swap -outputfilter influxdb:kafka` + ## Plugin Configuration There are some configuration options that are configurable per plugin: @@ -9,6 +18,9 @@ There are some configuration options that are configurable per plugin: * **name_prefix**: Specifies a prefix to attach to the measurement name. * **name_suffix**: Specifies a suffix to attach to the measurement name. * **tags**: A map of tags to apply to a specific plugin's measurements. +* **interval**: How often to gather this metric. Normal plugins use a single +global interval, but if one particular plugin should be run less or more often, +you can configure that here. ### Plugin Filters @@ -21,11 +33,8 @@ and if it matches, the field is emitted. * **tagpass**: tag names and arrays of strings that are used to filter measurements by the current plugin. Each string in the array is tested as a glob match against the tag name, and if it matches the measurement is emitted. -* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not emitted. -This is tested on measurements that have passed the tagpass test. -* **interval**: How often to gather this metric. Normal plugins use a single -global interval, but if one particular plugin should be run less or more often, -you can configure that here. +* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not +emitted. This is tested on measurements that have passed the tagpass test. ### Plugin Configuration Examples From 9c5db1057d700f65f5b97ede6dbac0c5217a34ec Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Jan 2016 13:39:43 -0700 Subject: [PATCH 046/103] renaming plugins -> inputs --- CHANGELOG.md | 23 +-- CONFIGURATION.md | 32 ++-- CONTRIBUTING.md | 28 +-- README.md | 6 +- accumulator.go | 26 +-- agent.go | 92 +++++----- agent_test.go | 24 +-- circle.yml | 2 - cmd/telegraf/telegraf.go | 73 ++++++-- etc/telegraf.conf | 20 +-- internal/config/config.go | 162 +++++++++--------- internal/config/config_test.go | 50 +++--- internal/config/testdata/single_plugin.toml | 6 +- internal/config/testdata/subconfig/exec.conf | 6 +- .../config/testdata/subconfig/memcached.conf | 6 +- .../config/testdata/subconfig/procstat.conf | 2 +- internal/config/testdata/telegraf-agent.toml | 60 ++++--- outputs/all/all.go | 16 -- plugins/all/all.go | 37 ---- plugins/{ => inputs}/aerospike/README.md | 0 plugins/{ => inputs}/aerospike/aerospike.go | 10 +- .../{ => inputs}/aerospike/aerospike_test.go | 0 plugins/inputs/all/all.go | 37 ++++ plugins/{ => inputs}/apache/README.md | 0 plugins/{ => inputs}/apache/apache.go | 8 +- plugins/{ => inputs}/apache/apache_test.go | 0 plugins/{ => inputs}/bcache/README.md | 12 +- plugins/{ => inputs}/bcache/bcache.go | 8 +- plugins/{ => inputs}/bcache/bcache_test.go | 0 plugins/{ => inputs}/disque/disque.go | 8 +- plugins/{ => inputs}/disque/disque_test.go | 0 plugins/{ => inputs}/elasticsearch/README.md | 0 .../elasticsearch/elasticsearch.go | 10 +- .../elasticsearch/elasticsearch_test.go | 0 .../elasticsearch/testdata_test.go | 0 plugins/{ => inputs}/exec/README.md | 0 plugins/{ => inputs}/exec/exec.go | 19 +- plugins/{ => inputs}/exec/exec_test.go | 5 +- plugins/{ => inputs}/haproxy/haproxy.go | 10 +- plugins/{ => inputs}/haproxy/haproxy_test.go | 0 plugins/{ => inputs}/httpjson/README.md | 0 plugins/{ => inputs}/httpjson/httpjson.go | 10 +- .../{ => inputs}/httpjson/httpjson_test.go | 0 plugins/{ => inputs}/influxdb/README.md | 2 +- plugins/{ => inputs}/influxdb/influxdb.go | 8 +- .../{ => inputs}/influxdb/influxdb_test.go | 2 +- plugins/{ => inputs}/jolokia/README.md | 0 plugins/{ => inputs}/jolokia/jolokia.go | 10 +- plugins/{ => inputs}/jolokia/jolokia_test.go | 0 plugins/{ => inputs}/kafka_consumer/README.md | 0 .../kafka_consumer/kafka_consumer.go | 6 +- .../kafka_consumer_integration_test.go | 0 .../kafka_consumer/kafka_consumer_test.go | 0 plugins/{ => inputs}/leofs/leofs.go | 8 +- plugins/{ => inputs}/leofs/leofs_test.go | 0 plugins/{ => inputs}/lustre2/lustre2.go | 8 +- plugins/{ => inputs}/lustre2/lustre2_test.go | 0 plugins/{ => inputs}/mailchimp/chimp_api.go | 0 plugins/{ => inputs}/mailchimp/mailchimp.go | 8 +- .../{ => inputs}/mailchimp/mailchimp_test.go | 0 plugins/{ => inputs}/memcached/memcached.go | 8 +- .../{ => inputs}/memcached/memcached_test.go | 0 plugins/{ => inputs}/mock_Plugin.go | 2 +- plugins/{ => inputs}/mongodb/mongodb.go | 8 +- plugins/{ => inputs}/mongodb/mongodb_data.go | 4 +- .../{ => inputs}/mongodb/mongodb_data_test.go | 0 .../{ => inputs}/mongodb/mongodb_server.go | 4 +- .../mongodb/mongodb_server_test.go | 0 plugins/{ => inputs}/mongodb/mongodb_test.go | 0 plugins/{ => inputs}/mongodb/mongostat.go | 0 plugins/{ => inputs}/mysql/mysql.go | 8 +- plugins/{ => inputs}/mysql/mysql_test.go | 0 plugins/{ => inputs}/mysql/parse_dsn.go | 0 plugins/{ => inputs}/nginx/nginx.go | 8 +- plugins/{ => inputs}/nginx/nginx_test.go | 0 plugins/{ => inputs}/phpfpm/README.md | 2 +- plugins/{ => inputs}/phpfpm/phpfpm.go | 10 +- plugins/{ => inputs}/phpfpm/phpfpm_fcgi.go | 0 plugins/{ => inputs}/phpfpm/phpfpm_test.go | 0 plugins/{ => inputs}/ping/ping.go | 8 +- plugins/{ => inputs}/ping/ping_test.go | 0 plugins/{ => inputs}/postgresql/README.md | 0 plugins/{ => inputs}/postgresql/postgresql.go | 8 +- .../postgresql/postgresql_test.go | 0 plugins/{ => inputs}/procstat/README.md | 0 plugins/{ => inputs}/procstat/procstat.go | 6 +- .../{ => inputs}/procstat/procstat_test.go | 0 .../{ => inputs}/procstat/spec_processor.go | 6 +- plugins/{ => inputs}/prometheus/prometheus.go | 8 +- .../prometheus/prometheus_test.go | 0 plugins/{ => inputs}/puppetagent/README.md | 0 .../puppetagent/last_run_summary.yaml | 0 .../{ => inputs}/puppetagent/puppetagent.go | 8 +- .../puppetagent/puppetagent_test.go | 0 plugins/{ => inputs}/rabbitmq/rabbitmq.go | 14 +- .../{ => inputs}/rabbitmq/rabbitmq_test.go | 0 plugins/{ => inputs}/redis/redis.go | 12 +- plugins/{ => inputs}/redis/redis_test.go | 0 plugins/{ => inputs}/registry.go | 26 +-- plugins/{ => inputs}/rethinkdb/rethinkdb.go | 8 +- .../{ => inputs}/rethinkdb/rethinkdb_data.go | 6 +- .../rethinkdb/rethinkdb_data_test.go | 0 .../rethinkdb/rethinkdb_server.go | 10 +- .../rethinkdb/rethinkdb_server_test.go | 0 .../{ => inputs}/rethinkdb/rethinkdb_test.go | 0 plugins/{ => inputs}/statsd/README.md | 0 plugins/{ => inputs}/statsd/running_stats.go | 0 .../{ => inputs}/statsd/running_stats_test.go | 0 plugins/{ => inputs}/statsd/statsd.go | 6 +- plugins/{ => inputs}/statsd/statsd_test.go | 0 plugins/{ => inputs}/system/CPU_README.md | 0 plugins/{ => inputs}/system/MEM_README.md | 0 plugins/{ => inputs}/system/NETSTAT_README.md | 0 plugins/{ => inputs}/system/cpu.go | 6 +- plugins/{ => inputs}/system/cpu_test.go | 0 plugins/{ => inputs}/system/disk.go | 10 +- plugins/{ => inputs}/system/disk_test.go | 0 plugins/{ => inputs}/system/docker.go | 6 +- plugins/{ => inputs}/system/docker_test.go | 0 plugins/{ => inputs}/system/memory.go | 10 +- plugins/{ => inputs}/system/memory_test.go | 0 plugins/{ => inputs}/system/mock_PS.go | 0 plugins/{ => inputs}/system/net.go | 6 +- plugins/{ => inputs}/system/net_test.go | 0 plugins/{ => inputs}/system/netstat.go | 6 +- plugins/{ => inputs}/system/ps.go | 4 +- plugins/{ => inputs}/system/system.go | 6 +- plugins/{ => inputs}/trig/trig.go | 6 +- plugins/{ => inputs}/trig/trig_test.go | 0 plugins/{ => inputs}/twemproxy/twemproxy.go | 12 +- .../{ => inputs}/twemproxy/twemproxy_test.go | 0 plugins/{ => inputs}/zfs/README.md | 0 plugins/{ => inputs}/zfs/zfs.go | 8 +- plugins/{ => inputs}/zfs/zfs_test.go | 0 plugins/{ => inputs}/zookeeper/README.md | 0 plugins/{ => inputs}/zookeeper/zookeeper.go | 8 +- .../{ => inputs}/zookeeper/zookeeper_test.go | 0 plugins/outputs/all/all.go | 16 ++ {outputs => plugins/outputs}/amon/README.md | 0 {outputs => plugins/outputs}/amon/amon.go | 2 +- .../outputs}/amon/amon_test.go | 0 {outputs => plugins/outputs}/amqp/README.md | 0 {outputs => plugins/outputs}/amqp/amqp.go | 2 +- .../outputs}/amqp/amqp_test.go | 0 .../outputs}/datadog/README.md | 0 .../outputs}/datadog/datadog.go | 2 +- .../outputs}/datadog/datadog_test.go | 0 .../outputs}/influxdb/README.md | 0 .../outputs}/influxdb/influxdb.go | 2 +- .../outputs}/influxdb/influxdb_test.go | 0 {outputs => plugins/outputs}/kafka/kafka.go | 2 +- .../outputs}/kafka/kafka_test.go | 0 .../outputs}/kinesis/README.md | 0 .../outputs}/kinesis/kinesis.go | 2 +- .../outputs}/kinesis/kinesis_test.go | 0 .../outputs}/librato/README.md | 0 .../outputs}/librato/librato.go | 2 +- .../outputs}/librato/librato_test.go | 0 {outputs => plugins/outputs}/mqtt/mqtt.go | 2 +- .../outputs}/mqtt/mqtt_test.go | 0 {outputs => plugins/outputs}/nsq/README.md | 0 {outputs => plugins/outputs}/nsq/nsq.go | 2 +- {outputs => plugins/outputs}/nsq/nsq_test.go | 0 .../outputs}/opentsdb/README.md | 0 .../outputs}/opentsdb/opentsdb.go | 2 +- .../outputs}/opentsdb/opentsdb_test.go | 0 .../outputs}/prometheus_client/README.md | 0 .../prometheus_client/prometheus_client.go | 2 +- .../prometheus_client_test.go | 2 +- {outputs => plugins/outputs}/registry.go | 0 .../outputs}/riemann/riemann.go | 2 +- .../outputs}/riemann/riemann_test.go | 0 scripts/circle-test.sh | 2 +- scripts/init.sh | 4 +- scripts/telegraf.service | 2 +- 175 files changed, 606 insertions(+), 572 deletions(-) delete mode 100644 outputs/all/all.go delete mode 100644 plugins/all/all.go rename plugins/{ => inputs}/aerospike/README.md (100%) rename plugins/{ => inputs}/aerospike/aerospike.go (96%) rename plugins/{ => inputs}/aerospike/aerospike_test.go (100%) create mode 100644 plugins/inputs/all/all.go rename plugins/{ => inputs}/apache/README.md (100%) rename plugins/{ => inputs}/apache/apache.go (93%) rename plugins/{ => inputs}/apache/apache_test.go (100%) rename plugins/{ => inputs}/bcache/README.md (97%) rename plugins/{ => inputs}/bcache/bcache.go (92%) rename plugins/{ => inputs}/bcache/bcache_test.go (100%) rename plugins/{ => inputs}/disque/disque.go (94%) rename plugins/{ => inputs}/disque/disque_test.go (100%) rename plugins/{ => inputs}/elasticsearch/README.md (100%) rename plugins/{ => inputs}/elasticsearch/elasticsearch.go (95%) rename plugins/{ => inputs}/elasticsearch/elasticsearch_test.go (100%) rename plugins/{ => inputs}/elasticsearch/testdata_test.go (100%) rename plugins/{ => inputs}/exec/README.md (100%) rename plugins/{ => inputs}/exec/exec.go (78%) rename plugins/{ => inputs}/exec/exec_test.go (93%) rename plugins/{ => inputs}/haproxy/haproxy.go (97%) rename plugins/{ => inputs}/haproxy/haproxy_test.go (100%) rename plugins/{ => inputs}/httpjson/README.md (100%) rename plugins/{ => inputs}/httpjson/httpjson.go (95%) rename plugins/{ => inputs}/httpjson/httpjson_test.go (100%) rename plugins/{ => inputs}/influxdb/README.md (98%) rename plugins/{ => inputs}/influxdb/influxdb.go (94%) rename plugins/{ => inputs}/influxdb/influxdb_test.go (97%) rename plugins/{ => inputs}/jolokia/README.md (100%) rename plugins/{ => inputs}/jolokia/jolokia.go (94%) rename plugins/{ => inputs}/jolokia/jolokia_test.go (100%) rename plugins/{ => inputs}/kafka_consumer/README.md (100%) rename plugins/{ => inputs}/kafka_consumer/kafka_consumer.go (96%) rename plugins/{ => inputs}/kafka_consumer/kafka_consumer_integration_test.go (100%) rename plugins/{ => inputs}/kafka_consumer/kafka_consumer_test.go (100%) rename plugins/{ => inputs}/leofs/leofs.go (96%) rename plugins/{ => inputs}/leofs/leofs_test.go (100%) rename plugins/{ => inputs}/lustre2/lustre2.go (96%) rename plugins/{ => inputs}/lustre2/lustre2_test.go (100%) rename plugins/{ => inputs}/mailchimp/chimp_api.go (100%) rename plugins/{ => inputs}/mailchimp/mailchimp.go (93%) rename plugins/{ => inputs}/mailchimp/mailchimp_test.go (100%) rename plugins/{ => inputs}/memcached/memcached.go (95%) rename plugins/{ => inputs}/memcached/memcached_test.go (100%) rename plugins/{ => inputs}/mock_Plugin.go (92%) rename plugins/{ => inputs}/mongodb/mongodb.go (93%) rename plugins/{ => inputs}/mongodb/mongodb_data.go (96%) rename plugins/{ => inputs}/mongodb/mongodb_data_test.go (100%) rename plugins/{ => inputs}/mongodb/mongodb_server.go (90%) rename plugins/{ => inputs}/mongodb/mongodb_server_test.go (100%) rename plugins/{ => inputs}/mongodb/mongodb_test.go (100%) rename plugins/{ => inputs}/mongodb/mongostat.go (100%) rename plugins/{ => inputs}/mysql/mysql.go (94%) rename plugins/{ => inputs}/mysql/mysql_test.go (100%) rename plugins/{ => inputs}/mysql/parse_dsn.go (100%) rename plugins/{ => inputs}/nginx/nginx.go (93%) rename plugins/{ => inputs}/nginx/nginx_test.go (100%) rename plugins/{ => inputs}/phpfpm/README.md (98%) rename plugins/{ => inputs}/phpfpm/phpfpm.go (94%) rename plugins/{ => inputs}/phpfpm/phpfpm_fcgi.go (100%) rename plugins/{ => inputs}/phpfpm/phpfpm_test.go (100%) rename plugins/{ => inputs}/ping/ping.go (96%) rename plugins/{ => inputs}/ping/ping_test.go (100%) rename plugins/{ => inputs}/postgresql/README.md (100%) rename plugins/{ => inputs}/postgresql/postgresql.go (93%) rename plugins/{ => inputs}/postgresql/postgresql_test.go (100%) rename plugins/{ => inputs}/procstat/README.md (100%) rename plugins/{ => inputs}/procstat/procstat.go (95%) rename plugins/{ => inputs}/procstat/procstat_test.go (100%) rename plugins/{ => inputs}/procstat/spec_processor.go (96%) rename plugins/{ => inputs}/prometheus/prometheus.go (89%) rename plugins/{ => inputs}/prometheus/prometheus_test.go (100%) rename plugins/{ => inputs}/puppetagent/README.md (100%) rename plugins/{ => inputs}/puppetagent/last_run_summary.yaml (100%) rename plugins/{ => inputs}/puppetagent/puppetagent.go (92%) rename plugins/{ => inputs}/puppetagent/puppetagent_test.go (100%) rename plugins/{ => inputs}/rabbitmq/rabbitmq.go (94%) rename plugins/{ => inputs}/rabbitmq/rabbitmq_test.go (100%) rename plugins/{ => inputs}/redis/redis.go (95%) rename plugins/{ => inputs}/redis/redis_test.go (100%) rename plugins/{ => inputs}/registry.go (64%) rename plugins/{ => inputs}/rethinkdb/rethinkdb.go (89%) rename plugins/{ => inputs}/rethinkdb/rethinkdb_data.go (95%) rename plugins/{ => inputs}/rethinkdb/rethinkdb_data_test.go (100%) rename plugins/{ => inputs}/rethinkdb/rethinkdb_server.go (94%) rename plugins/{ => inputs}/rethinkdb/rethinkdb_server_test.go (100%) rename plugins/{ => inputs}/rethinkdb/rethinkdb_test.go (100%) rename plugins/{ => inputs}/statsd/README.md (100%) rename plugins/{ => inputs}/statsd/running_stats.go (100%) rename plugins/{ => inputs}/statsd/running_stats_test.go (100%) rename plugins/{ => inputs}/statsd/statsd.go (98%) rename plugins/{ => inputs}/statsd/statsd_test.go (100%) rename plugins/{ => inputs}/system/CPU_README.md (100%) rename plugins/{ => inputs}/system/MEM_README.md (100%) rename plugins/{ => inputs}/system/NETSTAT_README.md (100%) rename plugins/{ => inputs}/system/cpu.go (95%) rename plugins/{ => inputs}/system/cpu_test.go (100%) rename plugins/{ => inputs}/system/disk.go (91%) rename plugins/{ => inputs}/system/disk_test.go (100%) rename plugins/{ => inputs}/system/docker.go (94%) rename plugins/{ => inputs}/system/docker_test.go (100%) rename plugins/{ => inputs}/system/memory.go (85%) rename plugins/{ => inputs}/system/memory_test.go (100%) rename plugins/{ => inputs}/system/mock_PS.go (100%) rename plugins/{ => inputs}/system/net.go (93%) rename plugins/{ => inputs}/system/net_test.go (100%) rename plugins/{ => inputs}/system/netstat.go (90%) rename plugins/{ => inputs}/system/ps.go (97%) rename plugins/{ => inputs}/system/system.go (88%) rename plugins/{ => inputs}/trig/trig.go (79%) rename plugins/{ => inputs}/trig/trig_test.go (100%) rename plugins/{ => inputs}/twemproxy/twemproxy.go (93%) rename plugins/{ => inputs}/twemproxy/twemproxy_test.go (100%) rename plugins/{ => inputs}/zfs/README.md (100%) rename plugins/{ => inputs}/zfs/zfs.go (93%) rename plugins/{ => inputs}/zfs/zfs_test.go (100%) rename plugins/{ => inputs}/zookeeper/README.md (100%) rename plugins/{ => inputs}/zookeeper/zookeeper.go (90%) rename plugins/{ => inputs}/zookeeper/zookeeper_test.go (100%) create mode 100644 plugins/outputs/all/all.go rename {outputs => plugins/outputs}/amon/README.md (100%) rename {outputs => plugins/outputs}/amon/amon.go (98%) rename {outputs => plugins/outputs}/amon/amon_test.go (100%) rename {outputs => plugins/outputs}/amqp/README.md (100%) rename {outputs => plugins/outputs}/amqp/amqp.go (98%) rename {outputs => plugins/outputs}/amqp/amqp_test.go (100%) rename {outputs => plugins/outputs}/datadog/README.md (100%) rename {outputs => plugins/outputs}/datadog/datadog.go (98%) rename {outputs => plugins/outputs}/datadog/datadog_test.go (100%) rename {outputs => plugins/outputs}/influxdb/README.md (100%) rename {outputs => plugins/outputs}/influxdb/influxdb.go (98%) rename {outputs => plugins/outputs}/influxdb/influxdb_test.go (100%) rename {outputs => plugins/outputs}/kafka/kafka.go (97%) rename {outputs => plugins/outputs}/kafka/kafka_test.go (100%) rename {outputs => plugins/outputs}/kinesis/README.md (100%) rename {outputs => plugins/outputs}/kinesis/kinesis.go (98%) rename {outputs => plugins/outputs}/kinesis/kinesis_test.go (100%) rename {outputs => plugins/outputs}/librato/README.md (100%) rename {outputs => plugins/outputs}/librato/librato.go (98%) rename {outputs => plugins/outputs}/librato/librato_test.go (100%) rename {outputs => plugins/outputs}/mqtt/mqtt.go (98%) rename {outputs => plugins/outputs}/mqtt/mqtt_test.go (100%) rename {outputs => plugins/outputs}/nsq/README.md (100%) rename {outputs => plugins/outputs}/nsq/nsq.go (96%) rename {outputs => plugins/outputs}/nsq/nsq_test.go (100%) rename {outputs => plugins/outputs}/opentsdb/README.md (100%) rename {outputs => plugins/outputs}/opentsdb/opentsdb.go (98%) rename {outputs => plugins/outputs}/opentsdb/opentsdb_test.go (100%) rename {outputs => plugins/outputs}/prometheus_client/README.md (100%) rename {outputs => plugins/outputs}/prometheus_client/prometheus_client.go (98%) rename {outputs => plugins/outputs}/prometheus_client/prometheus_client_test.go (97%) rename {outputs => plugins/outputs}/registry.go (100%) rename {outputs => plugins/outputs}/riemann/riemann.go (97%) rename {outputs => plugins/outputs}/riemann/riemann_test.go (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6d438319..d9856b5cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,17 @@ ## v0.3.0 [unreleased] ### Release Notes +- **breaking change** `plugins` have been renamed to `inputs`. This was done because +`plugins` is too generic, as there are now also "output plugins", and will likely +be "aggregator plugins" and "filter plugins" in the future. Additionally, +`inputs/` and `outputs/` directories have been placed in the root-level `plugins/` +directory. - **breaking change** the `io` plugin has been renamed `diskio` - **breaking change** Plugin measurements aggregated into a single measurement. - **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters for configuration. -- **breaking change** `procstat` plugin has `*cpu*` fields renamed to -`*cpu_time*` -- `twemproxy` plugin: `prefix` option removed. -- `procstat` cpu measurements are now prepended with `cpu_time_` instead of +- **breaking change** `twemproxy` plugin: `prefix` option removed. +- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_` instead of only `cpu_` - The prometheus plugin schema has not been changed (measurements have not been aggregated). @@ -18,7 +21,7 @@ aggregated). - Added ability to specify per-plugin tags - Added ability to specify per-plugin measurement suffix and prefix. (`name_prefix` and `name_suffix`) -- Added ability to override base plugin name. (`name_override`) +- Added ability to override base plugin measurement name. (`name_override`) ### Bugfixes @@ -62,11 +65,11 @@ functional. same type can be specified, like this: ``` -[[plugins.cpu]] +[[inputs.cpu]] percpu = false totalcpu = true -[[plugins.cpu]] +[[inputs.cpu]] percpu = true totalcpu = false drop = ["cpu_time"] @@ -93,7 +96,7 @@ same type can be specified, like this: lists of servers/URLs. 0.2.2 is being released solely to fix that bug ### Bugfixes -- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in plugins. +- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in inputs. ## v0.2.1 [2015-11-16] @@ -154,7 +157,7 @@ be controlled via the `round_interval` and `flush_jitter` config options. - [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou! - Memory plugin: cached and buffered measurements re-added - Logging: additional logging for each collection interval, track the number -of metrics collected and from how many plugins. +of metrics collected and from how many inputs. - [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib! - [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou! - [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive! @@ -187,7 +190,7 @@ will still be backwards compatible if only `url` is specified. - The -test flag will now output two metric collections - Support for filtering telegraf outputs on the CLI -- Telegraf will now allow filtering of output sinks on the command-line using the `-outputfilter` -flag, much like how the `-filter` flag works for plugins. +flag, much like how the `-filter` flag works for inputs. - Support for filtering on config-file creation -- Telegraf now supports filtering to -sample-config command. You can now run `telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config diff --git a/CONFIGURATION.md b/CONFIGURATION.md index c7b785edc..fc822d461 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -5,9 +5,9 @@ A default Telegraf config file can be generated using the `-sample-config` flag, like this: `telegraf -sample-config` -To generate a file with specific collectors and outputs, you can use the -`-filter` and `-outputfilter` flags, like this: -`telegraf -sample-config -filter cpu:mem:net:swap -outputfilter influxdb:kafka` +To generate a file with specific inputs and outputs, you can use the +`-input-filter` and `-output-filter` flags, like this: +`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka` ## Plugin Configuration @@ -59,7 +59,7 @@ fields which begin with `time_`. # PLUGINS [plugins] -[[plugins.cpu]] +[[inputs.cpu]] percpu = true totalcpu = false # filter all fields beginning with 'time_' @@ -70,16 +70,16 @@ fields which begin with `time_`. ```toml [plugins] -[[plugins.cpu]] +[[inputs.cpu]] percpu = true totalcpu = false drop = ["cpu_time"] # Don't collect CPU data for cpu6 & cpu7 - [plugins.cpu.tagdrop] + [inputs.cpu.tagdrop] cpu = [ "cpu6", "cpu7" ] -[[plugins.disk]] - [plugins.disk.tagpass] +[[inputs.disk]] + [inputs.disk.tagpass] # tagpass conditions are OR, not AND. # If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) # then the metric passes @@ -92,13 +92,13 @@ fields which begin with `time_`. ```toml # Drop all metrics for guest & steal CPU usage -[[plugins.cpu]] +[[inputs.cpu]] percpu = false totalcpu = true drop = ["usage_guest", "usage_steal"] # Only store inode related metrics for disks -[[plugins.disk]] +[[inputs.disk]] pass = ["inodes*"] ``` @@ -107,7 +107,7 @@ fields which begin with `time_`. This plugin will emit measurements with the name `cpu_total` ```toml -[[plugins.cpu]] +[[inputs.cpu]] name_suffix = "_total" percpu = false totalcpu = true @@ -116,7 +116,7 @@ This plugin will emit measurements with the name `cpu_total` This will emit measurements with the name `foobar` ```toml -[[plugins.cpu]] +[[inputs.cpu]] name_override = "foobar" percpu = false totalcpu = true @@ -128,10 +128,10 @@ This plugin will emit measurements with two additional tags: `tag1=foo` and `tag2=bar` ```toml -[[plugins.cpu]] +[[inputs.cpu]] percpu = false totalcpu = true - [plugins.cpu.tags] + [inputs.cpu.tags] tag1 = "foo" tag2 = "bar" ``` @@ -142,11 +142,11 @@ Additional plugins (or outputs) of the same type can be specified, just define more instances in the config file: ```toml -[[plugins.cpu]] +[[inputs.cpu]] percpu = false totalcpu = true -[[plugins.cpu]] +[[inputs.cpu]] percpu = true totalcpu = false drop = ["cpu_time*"] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7723f0605..12dcad1d0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,23 +5,23 @@ which can be found [on our website](http://influxdb.com/community/cla.html) ## Plugins -This section is for developers who want to create new collection plugins. +This section is for developers who want to create new collection inputs. Telegraf is entirely plugin driven. This interface allows for operators to pick and chose what is gathered as well as makes it easy for developers to create new ways of generating metrics. Plugin authorship is kept as simple as possible to promote people to develop -and submit new plugins. +and submit new inputs. ### Plugin Guidelines -* A plugin must conform to the `plugins.Plugin` interface. +* A plugin must conform to the `inputs.Input` interface. * Each generated metric automatically has the name of the plugin that generated it prepended. This is to keep plugins honest. -* Plugins should call `plugins.Add` in their `init` function to register themselves. +* Plugins should call `inputs.Add` in their `init` function to register themselves. See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdb/telegraf/plugins/all/all.go` file. +`github.com/influxdb/telegraf/plugins/inputs/all/all.go` file. * The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is include in `telegraf -sample-config`. * The `Description` function should say in one line what this plugin does. @@ -78,7 +78,7 @@ type Process struct { PID int } -func Gather(acc plugins.Accumulator) error { +func Gather(acc inputs.Accumulator) error { for _, process := range system.Processes() { tags := map[string]string { "pid": fmt.Sprintf("%d", process.Pid), @@ -97,7 +97,7 @@ package simple // simple.go -import "github.com/influxdb/telegraf/plugins" +import "github.com/influxdb/telegraf/plugins/inputs" type Simple struct { Ok bool @@ -111,7 +111,7 @@ func (s *Simple) SampleConfig() string { return "ok = true # indicate if everything is fine" } -func (s *Simple) Gather(acc plugins.Accumulator) error { +func (s *Simple) Gather(acc inputs.Accumulator) error { if s.Ok { acc.Add("state", "pretty good", nil) } else { @@ -122,14 +122,14 @@ func (s *Simple) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("simple", func() plugins.Plugin { return &Simple{} }) + inputs.Add("simple", func() inputs.Input { return &Simple{} }) } ``` ## Service Plugins This section is for developers who want to create new "service" collection -plugins. A service plugin differs from a regular plugin in that it operates +inputs. A service plugin differs from a regular plugin in that it operates a background service while Telegraf is running. One example would be the `statsd` plugin, which operates a statsd server. @@ -143,7 +143,7 @@ and `Stop()` methods. ### Service Plugin Guidelines * Same as the `Plugin` guidelines, except that they must conform to the -`plugins.ServicePlugin` interface. +`inputs.ServiceInput` interface. ### Service Plugin interface @@ -169,7 +169,7 @@ similar constructs. * Outputs should call `outputs.Add` in their `init` function to register themselves. See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdb/telegraf/outputs/all/all.go` file. +`github.com/influxdb/telegraf/plugins/outputs/all/all.go` file. * The `SampleConfig` function should return valid toml that describes how the output can be configured. This is include in `telegraf -sample-config`. * The `Description` function should say in one line what this output does. @@ -193,7 +193,7 @@ package simpleoutput // simpleoutput.go -import "github.com/influxdb/telegraf/outputs" +import "github.com/influxdb/telegraf/plugins/outputs" type Simple struct { Ok bool @@ -243,7 +243,7 @@ and `Stop()` methods. ### Service Output Guidelines * Same as the `Output` guidelines, except that they must conform to the -`plugins.ServiceOutput` interface. +`inputs.ServiceOutput` interface. ### Service Output interface diff --git a/README.md b/README.md index aefcece44..46f865951 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ APIs (like Mailchimp, AWS CloudWatch, or Google Analytics). We'll eagerly accept pull requests for new plugins and will manage the set of plugins that Telegraf supports. See the [contributing guide](CONTRIBUTING.md) for instructions on -writing new plugins. +writing new inputs. ## Installation: @@ -92,7 +92,7 @@ if you don't have it already. You also must build with golang version 1.4+. ### How to use it: * Run `telegraf -sample-config > telegraf.conf` to create an initial configuration. -* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf`. +* Or run `telegraf -sample-config -input-filter cpu:mem -output-filter influxdb > telegraf.conf`. to create a config file with only CPU and memory plugins defined, and InfluxDB output defined. * Edit the configuration to match your needs. @@ -100,7 +100,7 @@ output defined. sample to STDOUT. NOTE: you may want to run as the telegraf user if you are using the linux packages `sudo -u telegraf telegraf -config telegraf.conf -test` * Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs. -* Run `telegraf -config telegraf.conf -filter system:swap`. +* Run `telegraf -config telegraf.conf -input-filter system:swap`. to run telegraf with only the system & swap plugins defined in the config. ## Telegraf Options diff --git a/accumulator.go b/accumulator.go index f14df63f7..429f3a42c 100644 --- a/accumulator.go +++ b/accumulator.go @@ -29,12 +29,12 @@ type Accumulator interface { } func NewAccumulator( - pluginConfig *config.PluginConfig, + inputConfig *config.InputConfig, points chan *client.Point, ) Accumulator { acc := accumulator{} acc.points = points - acc.pluginConfig = pluginConfig + acc.inputConfig = inputConfig return &acc } @@ -47,7 +47,7 @@ type accumulator struct { debug bool - pluginConfig *config.PluginConfig + inputConfig *config.InputConfig prefix string } @@ -73,27 +73,27 @@ func (ac *accumulator) AddFields( return } - if !ac.pluginConfig.Filter.ShouldTagsPass(tags) { + if !ac.inputConfig.Filter.ShouldTagsPass(tags) { return } // Override measurement name if set - if len(ac.pluginConfig.NameOverride) != 0 { - measurement = ac.pluginConfig.NameOverride + if len(ac.inputConfig.NameOverride) != 0 { + measurement = ac.inputConfig.NameOverride } // Apply measurement prefix and suffix if set - if len(ac.pluginConfig.MeasurementPrefix) != 0 { - measurement = ac.pluginConfig.MeasurementPrefix + measurement + if len(ac.inputConfig.MeasurementPrefix) != 0 { + measurement = ac.inputConfig.MeasurementPrefix + measurement } - if len(ac.pluginConfig.MeasurementSuffix) != 0 { - measurement = measurement + ac.pluginConfig.MeasurementSuffix + if len(ac.inputConfig.MeasurementSuffix) != 0 { + measurement = measurement + ac.inputConfig.MeasurementSuffix } if tags == nil { tags = make(map[string]string) } // Apply plugin-wide tags if set - for k, v := range ac.pluginConfig.Tags { + for k, v := range ac.inputConfig.Tags { if _, ok := tags[k]; !ok { tags[k] = v } @@ -108,8 +108,8 @@ func (ac *accumulator) AddFields( result := make(map[string]interface{}) for k, v := range fields { // Filter out any filtered fields - if ac.pluginConfig != nil { - if !ac.pluginConfig.Filter.ShouldPass(k) { + if ac.inputConfig != nil { + if !ac.inputConfig.Filter.ShouldPass(k) { continue } } diff --git a/agent.go b/agent.go index 1658027a7..1af2a1f7c 100644 --- a/agent.go +++ b/agent.go @@ -10,8 +10,8 @@ import ( "time" "github.com/influxdb/telegraf/internal/config" - "github.com/influxdb/telegraf/outputs" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdb/telegraf/plugins/outputs" "github.com/influxdb/influxdb/client/v2" ) @@ -85,33 +85,33 @@ func (a *Agent) Close() error { return err } -// gatherParallel runs the plugins that are using the same reporting interval +// gatherParallel runs the inputs that are using the same reporting interval // as the telegraf agent. func (a *Agent) gatherParallel(pointChan chan *client.Point) error { var wg sync.WaitGroup start := time.Now() counter := 0 - for _, plugin := range a.Config.Plugins { - if plugin.Config.Interval != 0 { + for _, input := range a.Config.Inputs { + if input.Config.Interval != 0 { continue } wg.Add(1) counter++ - go func(plugin *config.RunningPlugin) { + go func(input *config.RunningInput) { defer wg.Done() - acc := NewAccumulator(plugin.Config, pointChan) + acc := NewAccumulator(input.Config, pointChan) acc.SetDebug(a.Config.Agent.Debug) - // acc.SetPrefix(plugin.Name + "_") + // acc.SetPrefix(input.Name + "_") acc.SetDefaultTags(a.Config.Tags) - if err := plugin.Plugin.Gather(acc); err != nil { - log.Printf("Error in plugin [%s]: %s", plugin.Name, err) + if err := input.Input.Gather(acc); err != nil { + log.Printf("Error in input [%s]: %s", input.Name, err) } - }(plugin) + }(input) } if counter == 0 { @@ -121,36 +121,36 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error { wg.Wait() elapsed := time.Since(start) - log.Printf("Gathered metrics, (%s interval), from %d plugins in %s\n", - a.Config.Agent.Interval, counter, elapsed) + log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n", + a.Config.Agent.Interval.Duration, counter, elapsed) return nil } -// gatherSeparate runs the plugins that have been configured with their own +// gatherSeparate runs the inputs that have been configured with their own // reporting interval. func (a *Agent) gatherSeparate( shutdown chan struct{}, - plugin *config.RunningPlugin, + input *config.RunningInput, pointChan chan *client.Point, ) error { - ticker := time.NewTicker(plugin.Config.Interval) + ticker := time.NewTicker(input.Config.Interval) for { var outerr error start := time.Now() - acc := NewAccumulator(plugin.Config, pointChan) + acc := NewAccumulator(input.Config, pointChan) acc.SetDebug(a.Config.Agent.Debug) - // acc.SetPrefix(plugin.Name + "_") + // acc.SetPrefix(input.Name + "_") acc.SetDefaultTags(a.Config.Tags) - if err := plugin.Plugin.Gather(acc); err != nil { - log.Printf("Error in plugin [%s]: %s", plugin.Name, err) + if err := input.Input.Gather(acc); err != nil { + log.Printf("Error in input [%s]: %s", input.Name, err) } elapsed := time.Since(start) log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n", - plugin.Config.Interval, plugin.Name, elapsed) + input.Config.Interval, input.Name, elapsed) if outerr != nil { return outerr @@ -165,7 +165,7 @@ func (a *Agent) gatherSeparate( } } -// Test verifies that we can 'Gather' from all plugins with their configured +// Test verifies that we can 'Gather' from all inputs with their configured // Config struct func (a *Agent) Test() error { shutdown := make(chan struct{}) @@ -184,27 +184,27 @@ func (a *Agent) Test() error { } }() - for _, plugin := range a.Config.Plugins { - acc := NewAccumulator(plugin.Config, pointChan) + for _, input := range a.Config.Inputs { + acc := NewAccumulator(input.Config, pointChan) acc.SetDebug(true) - // acc.SetPrefix(plugin.Name + "_") + // acc.SetPrefix(input.Name + "_") - fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name) - if plugin.Config.Interval != 0 { - fmt.Printf("* Internal: %s\n", plugin.Config.Interval) + fmt.Printf("* Plugin: %s, Collection 1\n", input.Name) + if input.Config.Interval != 0 { + fmt.Printf("* Internal: %s\n", input.Config.Interval) } - if err := plugin.Plugin.Gather(acc); err != nil { + if err := input.Input.Gather(acc); err != nil { return err } - // Special instructions for some plugins. cpu, for example, needs to be + // Special instructions for some inputs. cpu, for example, needs to be // run twice in order to return cpu usage percentages. - switch plugin.Name { + switch input.Name { case "cpu", "mongodb": time.Sleep(500 * time.Millisecond) - fmt.Printf("* Plugin: %s, Collection 2\n", plugin.Name) - if err := plugin.Plugin.Gather(acc); err != nil { + fmt.Printf("* Plugin: %s, Collection 2\n", input.Name) + if err := input.Input.Gather(acc); err != nil { return err } } @@ -332,10 +332,10 @@ func (a *Agent) Run(shutdown chan struct{}) error { log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+ "Flush Interval:%s\n", - a.Config.Agent.Interval, a.Config.Agent.Debug, - a.Config.Agent.Hostname, a.Config.Agent.FlushInterval) + a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, + a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) - // channel shared between all plugin threads for accumulating points + // channel shared between all input threads for accumulating points pointChan := make(chan *client.Point, 1000) // Round collection to nearest interval by sleeping @@ -354,29 +354,29 @@ func (a *Agent) Run(shutdown chan struct{}) error { } }() - for _, plugin := range a.Config.Plugins { + for _, input := range a.Config.Inputs { // Start service of any ServicePlugins - switch p := plugin.Plugin.(type) { - case plugins.ServicePlugin: + switch p := input.Input.(type) { + case inputs.ServiceInput: if err := p.Start(); err != nil { - log.Printf("Service for plugin %s failed to start, exiting\n%s\n", - plugin.Name, err.Error()) + log.Printf("Service for input %s failed to start, exiting\n%s\n", + input.Name, err.Error()) return err } defer p.Stop() } - // Special handling for plugins that have their own collection interval + // Special handling for inputs that have their own collection interval // configured. Default intervals are handled below with gatherParallel - if plugin.Config.Interval != 0 { + if input.Config.Interval != 0 { wg.Add(1) - go func(plugin *config.RunningPlugin) { + go func(input *config.RunningInput) { defer wg.Done() - if err := a.gatherSeparate(shutdown, plugin, pointChan); err != nil { + if err := a.gatherSeparate(shutdown, input, pointChan); err != nil { log.Printf(err.Error()) } - }(plugin) + }(input) } } diff --git a/agent_test.go b/agent_test.go index c08fbe292..1cb020c7b 100644 --- a/agent_test.go +++ b/agent_test.go @@ -8,46 +8,46 @@ import ( "github.com/influxdb/telegraf/internal/config" // needing to load the plugins - _ "github.com/influxdb/telegraf/plugins/all" + _ "github.com/influxdb/telegraf/plugins/inputs/all" // needing to load the outputs - _ "github.com/influxdb/telegraf/outputs/all" + _ "github.com/influxdb/telegraf/plugins/outputs/all" ) func TestAgent_LoadPlugin(t *testing.T) { c := config.NewConfig() - c.PluginFilters = []string{"mysql"} + c.InputFilters = []string{"mysql"} err := c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ := NewAgent(c) - assert.Equal(t, 1, len(a.Config.Plugins)) + assert.Equal(t, 1, len(a.Config.Inputs)) c = config.NewConfig() - c.PluginFilters = []string{"foo"} + c.InputFilters = []string{"foo"} err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) - assert.Equal(t, 0, len(a.Config.Plugins)) + assert.Equal(t, 0, len(a.Config.Inputs)) c = config.NewConfig() - c.PluginFilters = []string{"mysql", "foo"} + c.InputFilters = []string{"mysql", "foo"} err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) - assert.Equal(t, 1, len(a.Config.Plugins)) + assert.Equal(t, 1, len(a.Config.Inputs)) c = config.NewConfig() - c.PluginFilters = []string{"mysql", "redis"} + c.InputFilters = []string{"mysql", "redis"} err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) - assert.Equal(t, 2, len(a.Config.Plugins)) + assert.Equal(t, 2, len(a.Config.Inputs)) c = config.NewConfig() - c.PluginFilters = []string{"mysql", "foo", "redis", "bar"} + c.InputFilters = []string{"mysql", "foo", "redis", "bar"} err = c.LoadConfig("./internal/config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) - assert.Equal(t, 2, len(a.Config.Plugins)) + assert.Equal(t, 2, len(a.Config.Inputs)) } func TestAgent_LoadOutput(t *testing.T) { diff --git a/circle.yml b/circle.yml index 2d006d70e..d86d46dba 100644 --- a/circle.yml +++ b/circle.yml @@ -10,8 +10,6 @@ machine: - go version dependencies: - cache_directories: - - "~/telegraf-build/src" override: - docker info diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 8f9a628f9..cc54dbc88 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -10,23 +10,23 @@ import ( "github.com/influxdb/telegraf" "github.com/influxdb/telegraf/internal/config" - _ "github.com/influxdb/telegraf/outputs/all" - _ "github.com/influxdb/telegraf/plugins/all" + _ "github.com/influxdb/telegraf/plugins/inputs/all" + _ "github.com/influxdb/telegraf/plugins/outputs/all" ) var fDebug = flag.Bool("debug", false, "show metrics as they're generated to stdout") var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") var fConfig = flag.String("config", "", "configuration file to load") -var fConfigDirectory = flag.String("configdirectory", "", +var fConfigDirectory = flag.String("config-directory", "", "directory containing additional *.conf files") var fVersion = flag.Bool("version", false, "display the version") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") var fPidfile = flag.String("pidfile", "", "file to write our pid to") -var fPLuginFilters = flag.String("filter", "", +var fInputFilters = flag.String("input-filter", "", "filter the plugins to enable, separator is :") -var fOutputFilters = flag.String("outputfilter", "", +var fOutputFilters = flag.String("output-filter", "", "filter the outputs to enable, separator is :") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") @@ -35,13 +35,53 @@ var fUsage = flag.String("usage", "", // -ldflags "-X main.Version=`git describe --always --tags`" var Version string +const usage = `Telegraf, The plugin-driven server agent for reporting metrics into InfluxDB + +Usage: + + telegraf + +The flags are: + + -config configuration file to load + -test gather metrics once, print them to stdout, and exit + -sample-config print out full sample configuration to stdout + -config-directory directory containing additional *.conf files + -input-filter filter the input plugins to enable, separator is : + -output-filter filter the output plugins to enable, separator is : + -usage print usage for a plugin, ie, 'telegraf -usage mysql' + -version print the version to stdout + +Examples: + + # generate a telegraf config file: + telegraf -sample-config > telegraf.conf + + # generate a telegraf config file with only cpu input and influxdb output enabled + telegraf -sample-config -input-filter cpu -output-filter influxdb + + # run a single telegraf collection, outputting metrics to stdout + telegraf -config telegraf.conf -test + + # run telegraf with all plugins defined in config file + telegraf -config telegraf.conf + + # run telegraf, enabling only the cpu and memory inputs and influxdb output + telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb +` + func main() { + flag.Usage = usageExit flag.Parse() - var pluginFilters []string - if *fPLuginFilters != "" { - pluginsFilter := strings.TrimSpace(*fPLuginFilters) - pluginFilters = strings.Split(":"+pluginsFilter+":", ":") + if flag.NFlag() == 0 { + usageExit() + } + + var inputFilters []string + if *fInputFilters != "" { + inputFilter := strings.TrimSpace(*fInputFilters) + inputFilters = strings.Split(":"+inputFilter+":", ":") } var outputFilters []string @@ -57,12 +97,12 @@ func main() { } if *fSampleConfig { - config.PrintSampleConfig(pluginFilters, outputFilters) + config.PrintSampleConfig(inputFilters, outputFilters) return } if *fUsage != "" { - if err := config.PrintPluginConfig(*fUsage); err != nil { + if err := config.PrintInputConfig(*fUsage); err != nil { if err2 := config.PrintOutputConfig(*fUsage); err2 != nil { log.Fatalf("%s and %s", err, err2) } @@ -78,7 +118,7 @@ func main() { if *fConfig != "" { c = config.NewConfig() c.OutputFilters = outputFilters - c.PluginFilters = pluginFilters + c.InputFilters = inputFilters err = c.LoadConfig(*fConfig) if err != nil { log.Fatal(err) @@ -98,7 +138,7 @@ func main() { if len(c.Outputs) == 0 { log.Fatalf("Error: no outputs found, did you provide a valid config file?") } - if len(c.Plugins) == 0 { + if len(c.Inputs) == 0 { log.Fatalf("Error: no plugins found, did you provide a valid config file?") } @@ -134,7 +174,7 @@ func main() { log.Printf("Starting Telegraf (version %s)\n", Version) log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("Loaded plugins: %s", strings.Join(c.PluginNames(), " ")) + log.Printf("Loaded plugins: %s", strings.Join(c.InputNames(), " ")) log.Printf("Tags enabled: %s", c.ListTags()) if *fPidfile != "" { @@ -150,3 +190,8 @@ func main() { ag.Run(shutdown) } + +func usageExit() { + fmt.Println(usage) + os.Exit(0) +} diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 9460cef25..eb50005a5 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1,7 +1,7 @@ # Telegraf configuration # Telegraf is entirely plugin driven. All metrics are gathered from the -# declared plugins. +# declared inputs. # Even if a plugin has no configuration, it must be declared in here # to be active. Declaring a plugin means just specifying the name @@ -76,13 +76,13 @@ ############################################################################### -# PLUGINS # +# INPUTS # ############################################################################### -[plugins] +[inputs] # Read metrics about cpu usage -[[plugins.cpu]] +[[inputs.cpu]] # Whether to report per-cpu stats or not percpu = true # Whether to report total system cpu stats or not @@ -91,13 +91,13 @@ drop = ["cpu_time"] # Read metrics about disk usage by mount point -[[plugins.disk]] +[[inputs.disk]] # By default, telegraf gather stats for all mountpoints. # Setting mountpoints will restrict the stats to the specified mountpoints. # Mountpoints=["/"] # Read metrics about disk IO by device -[[plugins.diskio]] +[[inputs.diskio]] # By default, telegraf will gather stats for all devices including # disk partitions. # Setting devices will restrict the stats to the specified devices. @@ -106,18 +106,18 @@ # SkipSerialNumber = true # Read metrics about memory usage -[[plugins.mem]] +[[inputs.mem]] # no configuration # Read metrics about swap memory usage -[[plugins.swap]] +[[inputs.swap]] # no configuration # Read metrics about system load & uptime -[[plugins.system]] +[[inputs.system]] # no configuration ############################################################################### -# SERVICE PLUGINS # +# SERVICE INPUTS # ############################################################################### diff --git a/internal/config/config.go b/internal/config/config.go index 9275fa177..e96856015 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -11,8 +11,8 @@ import ( "time" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdb/telegraf/plugins/outputs" "github.com/naoina/toml" "github.com/naoina/toml/ast" @@ -25,11 +25,11 @@ import ( // specified type Config struct { Tags map[string]string - PluginFilters []string + InputFilters []string OutputFilters []string Agent *AgentConfig - Plugins []*RunningPlugin + Inputs []*RunningInput Outputs []*RunningOutput } @@ -45,9 +45,9 @@ func NewConfig() *Config { }, Tags: make(map[string]string), - Plugins: make([]*RunningPlugin, 0), + Inputs: make([]*RunningInput, 0), Outputs: make([]*RunningOutput, 0), - PluginFilters: make([]string, 0), + InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } return c @@ -93,10 +93,10 @@ type RunningOutput struct { Config *OutputConfig } -type RunningPlugin struct { +type RunningInput struct { Name string - Plugin plugins.Plugin - Config *PluginConfig + Input inputs.Input + Config *InputConfig } // Filter containing drop/pass and tagdrop/tagpass rules @@ -110,8 +110,8 @@ type Filter struct { IsActive bool } -// PluginConfig containing a name, interval, and filter -type PluginConfig struct { +// InputConfig containing a name, interval, and filter +type InputConfig struct { Name string NameOverride string MeasurementPrefix string @@ -204,16 +204,16 @@ func (f Filter) ShouldTagsPass(tags map[string]string) bool { return true } -// Plugins returns a list of strings of the configured plugins. -func (c *Config) PluginNames() []string { +// Inputs returns a list of strings of the configured inputs. +func (c *Config) InputNames() []string { var name []string - for _, plugin := range c.Plugins { - name = append(name, plugin.Name) + for _, input := range c.Inputs { + name = append(name, input.Name) } return name } -// Outputs returns a list of strings of the configured plugins. +// Outputs returns a list of strings of the configured inputs. func (c *Config) OutputNames() []string { var name []string for _, output := range c.Outputs { @@ -239,7 +239,7 @@ func (c *Config) ListTags() string { var header = `# Telegraf configuration # Telegraf is entirely plugin driven. All metrics are gathered from the -# declared plugins. +# declared inputs. # Even if a plugin has no configuration, it must be declared in here # to be active. Declaring a plugin means just specifying the name @@ -263,7 +263,7 @@ var header = `# Telegraf configuration # Configuration for telegraf agent [agent] - # Default data collection interval for all plugins + # Default data collection interval for all inputs interval = "10s" # Rounds collection interval to 'interval' # ie, if interval="10s" then always collect on :00, :10, :20, etc. @@ -293,16 +293,16 @@ var header = `# Telegraf configuration var pluginHeader = ` ############################################################################### -# PLUGINS # +# INPUTS # ############################################################################### -[plugins] +[inputs] ` -var servicePluginHeader = ` +var serviceInputHeader = ` ############################################################################### -# SERVICE PLUGINS # +# SERVICE INPUTS # ############################################################################### ` @@ -326,35 +326,35 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) { printConfig(oname, output, "outputs") } - // Filter plugins + // Filter inputs var pnames []string - for pname := range plugins.Plugins { + for pname := range inputs.Inputs { if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) { pnames = append(pnames, pname) } } sort.Strings(pnames) - // Print Plugins + // Print Inputs fmt.Printf(pluginHeader) - servPlugins := make(map[string]plugins.ServicePlugin) + servInputs := make(map[string]inputs.ServiceInput) for _, pname := range pnames { - creator := plugins.Plugins[pname] - plugin := creator() + creator := inputs.Inputs[pname] + input := creator() - switch p := plugin.(type) { - case plugins.ServicePlugin: - servPlugins[pname] = p + switch p := input.(type) { + case inputs.ServiceInput: + servInputs[pname] = p continue } - printConfig(pname, plugin, "plugins") + printConfig(pname, input, "inputs") } - // Print Service Plugins - fmt.Printf(servicePluginHeader) - for name, plugin := range servPlugins { - printConfig(name, plugin, "plugins") + // Print Service Inputs + fmt.Printf(serviceInputHeader) + for name, input := range servInputs { + printConfig(name, input, "inputs") } } @@ -382,12 +382,12 @@ func sliceContains(name string, list []string) bool { return false } -// PrintPluginConfig prints the config usage of a single plugin. -func PrintPluginConfig(name string) error { - if creator, ok := plugins.Plugins[name]; ok { - printConfig(name, creator(), "plugins") +// PrintInputConfig prints the config usage of a single input. +func PrintInputConfig(name string) error { + if creator, ok := inputs.Inputs[name]; ok { + printConfig(name, creator(), "inputs") } else { - return errors.New(fmt.Sprintf("Plugin %s not found", name)) + return errors.New(fmt.Sprintf("Input %s not found", name)) } return nil } @@ -453,33 +453,15 @@ func (c *Config) LoadConfig(path string) error { return err } case "outputs": - for outputName, outputVal := range subTable.Fields { - switch outputSubTable := outputVal.(type) { - case *ast.Table: - if err = c.addOutput(outputName, outputSubTable); err != nil { - return err - } - case []*ast.Table: - for _, t := range outputSubTable { - if err = c.addOutput(outputName, t); err != nil { - return err - } - } - default: - return fmt.Errorf("Unsupported config format: %s", - outputName) - } - } - case "plugins": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { case *ast.Table: - if err = c.addPlugin(pluginName, pluginSubTable); err != nil { + if err = c.addOutput(pluginName, pluginSubTable); err != nil { return err } case []*ast.Table: for _, t := range pluginSubTable { - if err = c.addPlugin(pluginName, t); err != nil { + if err = c.addOutput(pluginName, t); err != nil { return err } } @@ -488,10 +470,28 @@ func (c *Config) LoadConfig(path string) error { pluginName) } } - // Assume it's a plugin for legacy config file support if no other + case "inputs": + for pluginName, pluginVal := range subTable.Fields { + switch pluginSubTable := pluginVal.(type) { + case *ast.Table: + if err = c.addInput(pluginName, pluginSubTable); err != nil { + return err + } + case []*ast.Table: + for _, t := range pluginSubTable { + if err = c.addInput(pluginName, t); err != nil { + return err + } + } + default: + return fmt.Errorf("Unsupported config format: %s", + pluginName) + } + } + // Assume it's an input input for legacy config file support if no other // identifiers are present default: - if err = c.addPlugin(name, subTable); err != nil { + if err = c.addInput(name, subTable); err != nil { return err } } @@ -527,41 +527,41 @@ func (c *Config) addOutput(name string, table *ast.Table) error { return nil } -func (c *Config) addPlugin(name string, table *ast.Table) error { - if len(c.PluginFilters) > 0 && !sliceContains(name, c.PluginFilters) { +func (c *Config) addInput(name string, table *ast.Table) error { + if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) { return nil } - // Legacy support renaming io plugin to diskio + // Legacy support renaming io input to diskio if name == "io" { name = "diskio" } - creator, ok := plugins.Plugins[name] + creator, ok := inputs.Inputs[name] if !ok { - return fmt.Errorf("Undefined but requested plugin: %s", name) + return fmt.Errorf("Undefined but requested input: %s", name) } - plugin := creator() + input := creator() - pluginConfig, err := buildPlugin(name, table) + pluginConfig, err := buildInput(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, plugin); err != nil { + if err := toml.UnmarshalTable(table, input); err != nil { return err } - rp := &RunningPlugin{ + rp := &RunningInput{ Name: name, - Plugin: plugin, + Input: input, Config: pluginConfig, } - c.Plugins = append(c.Plugins, rp) + c.Inputs = append(c.Inputs, rp) return nil } // buildFilter builds a Filter (tagpass/tagdrop/pass/drop) to -// be inserted into the OutputConfig/PluginConfig to be used for prefix +// be inserted into the OutputConfig/InputConfig to be used for prefix // filtering on tags and measurements func buildFilter(tbl *ast.Table) Filter { f := Filter{} @@ -637,11 +637,11 @@ func buildFilter(tbl *ast.Table) Filter { return f } -// buildPlugin parses plugin specific items from the ast.Table, +// buildInput parses input specific items from the ast.Table, // builds the filter and returns a -// PluginConfig to be inserted into RunningPlugin -func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) { - cp := &PluginConfig{Name: name} +// InputConfig to be inserted into RunningInput +func buildInput(name string, tbl *ast.Table) (*InputConfig, error) { + cp := &InputConfig{Name: name} if node, ok := tbl.Fields["interval"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { @@ -683,7 +683,7 @@ func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) { if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil { - log.Printf("Could not parse tags for plugin %s\n", name) + log.Printf("Could not parse tags for input %s\n", name) } } } @@ -698,7 +698,7 @@ func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) { } // buildOutput parses output specific items from the ast.Table, builds the filter and returns an -// OutputConfig to be inserted into RunningPlugin +// OutputConfig to be inserted into RunningInput // Note: error exists in the return for future calls that might require error func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) { oc := &OutputConfig{ diff --git a/internal/config/config_test.go b/internal/config/config_test.go index eeb1f7fae..c8ed79bdf 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -4,21 +4,21 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/plugins" - "github.com/influxdb/telegraf/plugins/exec" - "github.com/influxdb/telegraf/plugins/memcached" - "github.com/influxdb/telegraf/plugins/procstat" + "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdb/telegraf/plugins/inputs/exec" + "github.com/influxdb/telegraf/plugins/inputs/memcached" + "github.com/influxdb/telegraf/plugins/inputs/procstat" "github.com/stretchr/testify/assert" ) -func TestConfig_LoadSinglePlugin(t *testing.T) { +func TestConfig_LoadSingleInput(t *testing.T) { c := NewConfig() c.LoadConfig("./testdata/single_plugin.toml") - memcached := plugins.Plugins["memcached"]().(*memcached.Memcached) + memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached.Servers = []string{"localhost"} - mConfig := &PluginConfig{ + mConfig := &InputConfig{ Name: "memcached", Filter: Filter{ Drop: []string{"other", "stuff"}, @@ -41,9 +41,9 @@ func TestConfig_LoadSinglePlugin(t *testing.T) { } mConfig.Tags = make(map[string]string) - assert.Equal(t, memcached, c.Plugins[0].Plugin, + assert.Equal(t, memcached, c.Inputs[0].Input, "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Plugins[0].Config, + assert.Equal(t, mConfig, c.Inputs[0].Config, "Testdata did not produce correct memcached metadata.") } @@ -58,10 +58,10 @@ func TestConfig_LoadDirectory(t *testing.T) { t.Error(err) } - memcached := plugins.Plugins["memcached"]().(*memcached.Memcached) + memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) memcached.Servers = []string{"localhost"} - mConfig := &PluginConfig{ + mConfig := &InputConfig{ Name: "memcached", Filter: Filter{ Drop: []string{"other", "stuff"}, @@ -84,36 +84,38 @@ func TestConfig_LoadDirectory(t *testing.T) { } mConfig.Tags = make(map[string]string) - assert.Equal(t, memcached, c.Plugins[0].Plugin, + assert.Equal(t, memcached, c.Inputs[0].Input, "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Plugins[0].Config, + assert.Equal(t, mConfig, c.Inputs[0].Config, "Testdata did not produce correct memcached metadata.") - ex := plugins.Plugins["exec"]().(*exec.Exec) + ex := inputs.Inputs["exec"]().(*exec.Exec) ex.Command = "/usr/bin/myothercollector --foo=bar" - ex.Name = "myothercollector" - eConfig := &PluginConfig{Name: "exec"} + eConfig := &InputConfig{ + Name: "exec", + MeasurementSuffix: "_myothercollector", + } eConfig.Tags = make(map[string]string) - assert.Equal(t, ex, c.Plugins[1].Plugin, + assert.Equal(t, ex, c.Inputs[1].Input, "Merged Testdata did not produce a correct exec struct.") - assert.Equal(t, eConfig, c.Plugins[1].Config, + assert.Equal(t, eConfig, c.Inputs[1].Config, "Merged Testdata did not produce correct exec metadata.") memcached.Servers = []string{"192.168.1.1"} - assert.Equal(t, memcached, c.Plugins[2].Plugin, + assert.Equal(t, memcached, c.Inputs[2].Input, "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Plugins[2].Config, + assert.Equal(t, mConfig, c.Inputs[2].Config, "Testdata did not produce correct memcached metadata.") - pstat := plugins.Plugins["procstat"]().(*procstat.Procstat) + pstat := inputs.Inputs["procstat"]().(*procstat.Procstat) pstat.PidFile = "/var/run/grafana-server.pid" - pConfig := &PluginConfig{Name: "procstat"} + pConfig := &InputConfig{Name: "procstat"} pConfig.Tags = make(map[string]string) - assert.Equal(t, pstat, c.Plugins[3].Plugin, + assert.Equal(t, pstat, c.Inputs[3].Input, "Merged Testdata did not produce a correct procstat struct.") - assert.Equal(t, pConfig, c.Plugins[3].Config, + assert.Equal(t, pConfig, c.Inputs[3].Config, "Merged Testdata did not produce correct procstat metadata.") } diff --git a/internal/config/testdata/single_plugin.toml b/internal/config/testdata/single_plugin.toml index e591984f1..6670f6b2f 100644 --- a/internal/config/testdata/single_plugin.toml +++ b/internal/config/testdata/single_plugin.toml @@ -1,9 +1,9 @@ -[[plugins.memcached]] +[[inputs.memcached]] servers = ["localhost"] pass = ["some", "strings"] drop = ["other", "stuff"] interval = "5s" - [plugins.memcached.tagpass] + [inputs.memcached.tagpass] goodtag = ["mytag"] - [plugins.memcached.tagdrop] + [inputs.memcached.tagdrop] badtag = ["othertag"] diff --git a/internal/config/testdata/subconfig/exec.conf b/internal/config/testdata/subconfig/exec.conf index 80aca00e8..d621e78e0 100644 --- a/internal/config/testdata/subconfig/exec.conf +++ b/internal/config/testdata/subconfig/exec.conf @@ -1,6 +1,4 @@ -[[plugins.exec]] +[[inputs.exec]] # the command to run command = "/usr/bin/myothercollector --foo=bar" - - # name of the command (used as a prefix for measurements) - name = "myothercollector" + name_suffix = "_myothercollector" diff --git a/internal/config/testdata/subconfig/memcached.conf b/internal/config/testdata/subconfig/memcached.conf index 8d67886c1..4c43febc7 100644 --- a/internal/config/testdata/subconfig/memcached.conf +++ b/internal/config/testdata/subconfig/memcached.conf @@ -1,9 +1,9 @@ -[[plugins.memcached]] +[[inputs.memcached]] servers = ["192.168.1.1"] pass = ["some", "strings"] drop = ["other", "stuff"] interval = "5s" - [plugins.memcached.tagpass] + [inputs.memcached.tagpass] goodtag = ["mytag"] - [plugins.memcached.tagdrop] + [inputs.memcached.tagdrop] badtag = ["othertag"] diff --git a/internal/config/testdata/subconfig/procstat.conf b/internal/config/testdata/subconfig/procstat.conf index 9f23e8d58..82708667f 100644 --- a/internal/config/testdata/subconfig/procstat.conf +++ b/internal/config/testdata/subconfig/procstat.conf @@ -1,2 +1,2 @@ -[[plugins.procstat]] +[[inputs.procstat]] pid_file = "/var/run/grafana-server.pid" diff --git a/internal/config/testdata/telegraf-agent.toml b/internal/config/testdata/telegraf-agent.toml index 7b8cdb2bb..3c279db34 100644 --- a/internal/config/testdata/telegraf-agent.toml +++ b/internal/config/testdata/telegraf-agent.toml @@ -1,7 +1,7 @@ # Telegraf configuration # Telegraf is entirely plugin driven. All metrics are gathered from the -# declared plugins. +# declared inputs. # Even if a plugin has no configuration, it must be declared in here # to be active. Declaring a plugin means just specifying the name @@ -70,15 +70,15 @@ # PLUGINS # ############################################################################### -[plugins] +[inputs] # Read Apache status information (mod_status) -[[plugins.apache]] +[[inputs.apache]] # An array of Apache status URI to gather stats. urls = ["http://localhost/server-status?auto"] # Read metrics about cpu usage -[[plugins.cpu]] +[[inputs.cpu]] # Whether to report per-cpu stats or not percpu = true # Whether to report total system cpu stats or not @@ -87,11 +87,11 @@ drop = ["cpu_time"] # Read metrics about disk usage by mount point -[[plugins.diskio]] +[[inputs.diskio]] # no configuration # Read metrics from one or many disque servers -[[plugins.disque]] +[[inputs.disque]] # An array of URI to gather stats about. Specify an ip or hostname # with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, # 10.0.0.1:10000, etc. @@ -100,7 +100,7 @@ servers = ["localhost"] # Read stats from one or more Elasticsearch servers or clusters -[[plugins.elasticsearch]] +[[inputs.elasticsearch]] # specify a list of one or more Elasticsearch servers servers = ["http://localhost:9200"] @@ -109,15 +109,13 @@ local = true # Read flattened metrics from one or more commands that output JSON to stdout -[[plugins.exec]] +[[inputs.exec]] # the command to run command = "/usr/bin/mycollector --foo=bar" - - # name of the command (used as a prefix for measurements) - name = "mycollector" + name_suffix = "_mycollector" # Read metrics of haproxy, via socket or csv stats page -[[plugins.haproxy]] +[[inputs.haproxy]] # An array of address to gather stats about. Specify an ip on hostname # with optional port. ie localhost, 10.10.3.33:1936, etc. # @@ -127,7 +125,7 @@ # servers = ["socket:/run/haproxy/admin.sock"] # Read flattened metrics from one or more JSON HTTP endpoints -[[plugins.httpjson]] +[[inputs.httpjson]] # a name for the service being polled name = "webserver_stats" @@ -146,11 +144,11 @@ threshold = "0.75" # Read metrics about disk IO by device -[[plugins.diskio]] +[[inputs.diskio]] # no configuration # read metrics from a Kafka topic -[[plugins.kafka_consumer]] +[[inputs.kafka_consumer]] # topic(s) to consume topics = ["telegraf"] # an array of Zookeeper connection strings @@ -163,7 +161,7 @@ offset = "oldest" # Read metrics from a LeoFS Server via SNMP -[[plugins.leofs]] +[[inputs.leofs]] # An array of URI to gather stats about LeoFS. # Specify an ip or hostname with port. ie 127.0.0.1:4020 # @@ -171,7 +169,7 @@ servers = ["127.0.0.1:4021"] # Read metrics from local Lustre service on OST, MDS -[[plugins.lustre2]] +[[inputs.lustre2]] # An array of /proc globs to search for Lustre stats # If not specified, the default will work on Lustre 2.5.x # @@ -179,11 +177,11 @@ # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] # Read metrics about memory usage -[[plugins.mem]] +[[inputs.mem]] # no configuration # Read metrics from one or many memcached servers -[[plugins.memcached]] +[[inputs.memcached]] # An array of address to gather stats about. Specify an ip on hostname # with optional port. ie localhost, 10.0.0.1:11211, etc. # @@ -191,7 +189,7 @@ servers = ["localhost"] # Read metrics from one or many MongoDB servers -[[plugins.mongodb]] +[[inputs.mongodb]] # An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, # mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. @@ -200,7 +198,7 @@ servers = ["127.0.0.1:27017"] # Read metrics from one or many mysql servers -[[plugins.mysql]] +[[inputs.mysql]] # specify servers via a url matching: # [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] # e.g. @@ -211,7 +209,7 @@ servers = ["localhost"] # Read metrics about network interface usage -[[plugins.net]] +[[inputs.net]] # By default, telegraf gathers stats from any up interface (excluding loopback) # Setting interfaces will tell it to gather these explicit interfaces, # regardless of status. @@ -219,12 +217,12 @@ # interfaces = ["eth0", ... ] # Read Nginx's basic status information (ngx_http_stub_status_module) -[[plugins.nginx]] +[[inputs.nginx]] # An array of Nginx stub_status URI to gather stats. urls = ["http://localhost/status"] # Ping given url(s) and return statistics -[[plugins.ping]] +[[inputs.ping]] # urls to ping urls = ["www.google.com"] # required # number of pings to send (ping -c ) @@ -237,7 +235,7 @@ interface = "" # Read metrics from one or many postgresql servers -[[plugins.postgresql]] +[[inputs.postgresql]] # specify address via a url matching: # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: @@ -264,12 +262,12 @@ # address = "influx@remoteserver" # Read metrics from one or many prometheus clients -[[plugins.prometheus]] +[[inputs.prometheus]] # An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] # Read metrics from one or many RabbitMQ servers via the management API -[[plugins.rabbitmq]] +[[inputs.rabbitmq]] # Specify servers via an array of tables # name = "rmq-server-1" # optional tag # url = "http://localhost:15672" @@ -281,7 +279,7 @@ # nodes = ["rabbit@node1", "rabbit@node2"] # Read metrics from one or many redis servers -[[plugins.redis]] +[[inputs.redis]] # An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie redis://localhost, redis://10.10.3.33:18832, # 10.0.0.1:10000, etc. @@ -290,7 +288,7 @@ servers = ["localhost"] # Read metrics from one or many RethinkDB servers -[[plugins.rethinkdb]] +[[inputs.rethinkdb]] # An array of URI to gather stats about. Specify an ip or hostname # with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105, # rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc. @@ -299,9 +297,9 @@ servers = ["127.0.0.1:28015"] # Read metrics about swap memory usage -[[plugins.swap]] +[[inputs.swap]] # no configuration # Read metrics about system load & uptime -[[plugins.system]] +[[inputs.system]] # no configuration diff --git a/outputs/all/all.go b/outputs/all/all.go deleted file mode 100644 index 08ebf2549..000000000 --- a/outputs/all/all.go +++ /dev/null @@ -1,16 +0,0 @@ -package all - -import ( - _ "github.com/influxdb/telegraf/outputs/amon" - _ "github.com/influxdb/telegraf/outputs/amqp" - _ "github.com/influxdb/telegraf/outputs/datadog" - _ "github.com/influxdb/telegraf/outputs/influxdb" - _ "github.com/influxdb/telegraf/outputs/kafka" - _ "github.com/influxdb/telegraf/outputs/kinesis" - _ "github.com/influxdb/telegraf/outputs/librato" - _ "github.com/influxdb/telegraf/outputs/mqtt" - _ "github.com/influxdb/telegraf/outputs/nsq" - _ "github.com/influxdb/telegraf/outputs/opentsdb" - _ "github.com/influxdb/telegraf/outputs/prometheus_client" - _ "github.com/influxdb/telegraf/outputs/riemann" -) diff --git a/plugins/all/all.go b/plugins/all/all.go deleted file mode 100644 index 8b4e754be..000000000 --- a/plugins/all/all.go +++ /dev/null @@ -1,37 +0,0 @@ -package all - -import ( - _ "github.com/influxdb/telegraf/plugins/aerospike" - _ "github.com/influxdb/telegraf/plugins/apache" - _ "github.com/influxdb/telegraf/plugins/bcache" - _ "github.com/influxdb/telegraf/plugins/disque" - _ "github.com/influxdb/telegraf/plugins/elasticsearch" - _ "github.com/influxdb/telegraf/plugins/exec" - _ "github.com/influxdb/telegraf/plugins/haproxy" - _ "github.com/influxdb/telegraf/plugins/httpjson" - _ "github.com/influxdb/telegraf/plugins/influxdb" - _ "github.com/influxdb/telegraf/plugins/jolokia" - _ "github.com/influxdb/telegraf/plugins/kafka_consumer" - _ "github.com/influxdb/telegraf/plugins/leofs" - _ "github.com/influxdb/telegraf/plugins/lustre2" - _ "github.com/influxdb/telegraf/plugins/mailchimp" - _ "github.com/influxdb/telegraf/plugins/memcached" - _ "github.com/influxdb/telegraf/plugins/mongodb" - _ "github.com/influxdb/telegraf/plugins/mysql" - _ "github.com/influxdb/telegraf/plugins/nginx" - _ "github.com/influxdb/telegraf/plugins/phpfpm" - _ "github.com/influxdb/telegraf/plugins/ping" - _ "github.com/influxdb/telegraf/plugins/postgresql" - _ "github.com/influxdb/telegraf/plugins/procstat" - _ "github.com/influxdb/telegraf/plugins/prometheus" - _ "github.com/influxdb/telegraf/plugins/puppetagent" - _ "github.com/influxdb/telegraf/plugins/rabbitmq" - _ "github.com/influxdb/telegraf/plugins/redis" - _ "github.com/influxdb/telegraf/plugins/rethinkdb" - _ "github.com/influxdb/telegraf/plugins/statsd" - _ "github.com/influxdb/telegraf/plugins/system" - _ "github.com/influxdb/telegraf/plugins/trig" - _ "github.com/influxdb/telegraf/plugins/twemproxy" - _ "github.com/influxdb/telegraf/plugins/zfs" - _ "github.com/influxdb/telegraf/plugins/zookeeper" -) diff --git a/plugins/aerospike/README.md b/plugins/inputs/aerospike/README.md similarity index 100% rename from plugins/aerospike/README.md rename to plugins/inputs/aerospike/README.md diff --git a/plugins/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go similarity index 96% rename from plugins/aerospike/aerospike.go rename to plugins/inputs/aerospike/aerospike.go index 9d920646d..5f847ebfa 100644 --- a/plugins/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/binary" "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "net" "strconv" "strings" @@ -119,7 +119,7 @@ func (a *Aerospike) Description() string { return "Read stats from an aerospike server" } -func (a *Aerospike) Gather(acc plugins.Accumulator) error { +func (a *Aerospike) Gather(acc inputs.Accumulator) error { if len(a.Servers) == 0 { return a.gatherServer("127.0.0.1:3000", acc) } @@ -140,7 +140,7 @@ func (a *Aerospike) Gather(acc plugins.Accumulator) error { return outerr } -func (a *Aerospike) gatherServer(host string, acc plugins.Accumulator) error { +func (a *Aerospike) gatherServer(host string, acc inputs.Accumulator) error { aerospikeInfo, err := getMap(STATISTICS_COMMAND, host) if err != nil { return fmt.Errorf("Aerospike info failed: %s", err) @@ -249,7 +249,7 @@ func get(key []byte, host string) (map[string]string, error) { func readAerospikeStats( stats map[string]string, - acc plugins.Accumulator, + acc inputs.Accumulator, host string, namespace string, ) { @@ -336,7 +336,7 @@ func msgLenFromBytes(buf [6]byte) int64 { } func init() { - plugins.Add("aerospike", func() plugins.Plugin { + inputs.Add("aerospike", func() inputs.Input { return &Aerospike{} }) } diff --git a/plugins/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go similarity index 100% rename from plugins/aerospike/aerospike_test.go rename to plugins/inputs/aerospike/aerospike_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go new file mode 100644 index 000000000..fea1c7ca0 --- /dev/null +++ b/plugins/inputs/all/all.go @@ -0,0 +1,37 @@ +package all + +import ( + _ "github.com/influxdb/telegraf/plugins/inputs/aerospike" + _ "github.com/influxdb/telegraf/plugins/inputs/apache" + _ "github.com/influxdb/telegraf/plugins/inputs/bcache" + _ "github.com/influxdb/telegraf/plugins/inputs/disque" + _ "github.com/influxdb/telegraf/plugins/inputs/elasticsearch" + _ "github.com/influxdb/telegraf/plugins/inputs/exec" + _ "github.com/influxdb/telegraf/plugins/inputs/haproxy" + _ "github.com/influxdb/telegraf/plugins/inputs/httpjson" + _ "github.com/influxdb/telegraf/plugins/inputs/influxdb" + _ "github.com/influxdb/telegraf/plugins/inputs/jolokia" + _ "github.com/influxdb/telegraf/plugins/inputs/kafka_consumer" + _ "github.com/influxdb/telegraf/plugins/inputs/leofs" + _ "github.com/influxdb/telegraf/plugins/inputs/lustre2" + _ "github.com/influxdb/telegraf/plugins/inputs/mailchimp" + _ "github.com/influxdb/telegraf/plugins/inputs/memcached" + _ "github.com/influxdb/telegraf/plugins/inputs/mongodb" + _ "github.com/influxdb/telegraf/plugins/inputs/mysql" + _ "github.com/influxdb/telegraf/plugins/inputs/nginx" + _ "github.com/influxdb/telegraf/plugins/inputs/phpfpm" + _ "github.com/influxdb/telegraf/plugins/inputs/ping" + _ "github.com/influxdb/telegraf/plugins/inputs/postgresql" + _ "github.com/influxdb/telegraf/plugins/inputs/procstat" + _ "github.com/influxdb/telegraf/plugins/inputs/prometheus" + _ "github.com/influxdb/telegraf/plugins/inputs/puppetagent" + _ "github.com/influxdb/telegraf/plugins/inputs/rabbitmq" + _ "github.com/influxdb/telegraf/plugins/inputs/redis" + _ "github.com/influxdb/telegraf/plugins/inputs/rethinkdb" + _ "github.com/influxdb/telegraf/plugins/inputs/statsd" + _ "github.com/influxdb/telegraf/plugins/inputs/system" + _ "github.com/influxdb/telegraf/plugins/inputs/trig" + _ "github.com/influxdb/telegraf/plugins/inputs/twemproxy" + _ "github.com/influxdb/telegraf/plugins/inputs/zfs" + _ "github.com/influxdb/telegraf/plugins/inputs/zookeeper" +) diff --git a/plugins/apache/README.md b/plugins/inputs/apache/README.md similarity index 100% rename from plugins/apache/README.md rename to plugins/inputs/apache/README.md diff --git a/plugins/apache/apache.go b/plugins/inputs/apache/apache.go similarity index 93% rename from plugins/apache/apache.go rename to plugins/inputs/apache/apache.go index 958a0296e..f48bac336 100644 --- a/plugins/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Apache struct { @@ -31,7 +31,7 @@ func (n *Apache) Description() string { return "Read Apache status information (mod_status)" } -func (n *Apache) Gather(acc plugins.Accumulator) error { +func (n *Apache) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup var outerr error @@ -59,7 +59,7 @@ var tr = &http.Transport{ var client = &http.Client{Transport: tr} -func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error { +func (n *Apache) gatherUrl(addr *url.URL, acc inputs.Accumulator) error { resp, err := client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) @@ -164,7 +164,7 @@ func getTags(addr *url.URL) map[string]string { } func init() { - plugins.Add("apache", func() plugins.Plugin { + inputs.Add("apache", func() inputs.Input { return &Apache{} }) } diff --git a/plugins/apache/apache_test.go b/plugins/inputs/apache/apache_test.go similarity index 100% rename from plugins/apache/apache_test.go rename to plugins/inputs/apache/apache_test.go diff --git a/plugins/bcache/README.md b/plugins/inputs/bcache/README.md similarity index 97% rename from plugins/bcache/README.md rename to plugins/inputs/bcache/README.md index 27062b915..98a841bf5 100644 --- a/plugins/bcache/README.md +++ b/plugins/inputs/bcache/README.md @@ -26,27 +26,27 @@ Measurement names: dirty_data Amount of dirty data for this backing device in the cache. Continuously updated unlike the cache set's version, but may be slightly off. - + bypassed Amount of IO (both reads and writes) that has bypassed the cache - + cache_bypass_hits cache_bypass_misses Hits and misses for IO that is intended to skip the cache are still counted, but broken out here. - + cache_hits cache_misses cache_hit_ratio Hits and misses are counted per individual IO as bcache sees them; a partial hit is counted as a miss. - + cache_miss_collisions Counts instances where data was going to be inserted into the cache from a cache miss, but raced with a write and data was already present (usually 0 since the synchronization for cache misses was rewritten) - + cache_readaheads Count of times readahead occurred. ``` @@ -70,7 +70,7 @@ Using this configuration: When run with: ``` -./telegraf -config telegraf.conf -filter bcache -test +./telegraf -config telegraf.conf -input-filter bcache -test ``` It produces: diff --git a/plugins/bcache/bcache.go b/plugins/inputs/bcache/bcache.go similarity index 92% rename from plugins/bcache/bcache.go rename to plugins/inputs/bcache/bcache.go index 92cea3d63..146849eef 100644 --- a/plugins/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Bcache struct { @@ -69,7 +69,7 @@ func prettyToBytes(v string) uint64 { return uint64(result) } -func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error { +func (b *Bcache) gatherBcache(bdev string, acc inputs.Accumulator) error { tags := getTags(bdev) metrics, err := filepath.Glob(bdev + "/stats_total/*") if len(metrics) < 0 { @@ -104,7 +104,7 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error { return nil } -func (b *Bcache) Gather(acc plugins.Accumulator) error { +func (b *Bcache) Gather(acc inputs.Accumulator) error { bcacheDevsChecked := make(map[string]bool) var restrictDevs bool if len(b.BcacheDevs) != 0 { @@ -135,7 +135,7 @@ func (b *Bcache) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("bcache", func() plugins.Plugin { + inputs.Add("bcache", func() inputs.Input { return &Bcache{} }) } diff --git a/plugins/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go similarity index 100% rename from plugins/bcache/bcache_test.go rename to plugins/inputs/bcache/bcache_test.go diff --git a/plugins/disque/disque.go b/plugins/inputs/disque/disque.go similarity index 94% rename from plugins/disque/disque.go rename to plugins/inputs/disque/disque.go index b7b7dd5c1..334fdd554 100644 --- a/plugins/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Disque struct { @@ -61,7 +61,7 @@ var ErrProtocolError = errors.New("disque protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *Disque) Gather(acc plugins.Accumulator) error { +func (g *Disque) Gather(acc inputs.Accumulator) error { if len(g.Servers) == 0 { url := &url.URL{ Host: ":7711", @@ -98,7 +98,7 @@ func (g *Disque) Gather(acc plugins.Accumulator) error { const defaultPort = "7711" -func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error { +func (g *Disque) gatherServer(addr *url.URL, acc inputs.Accumulator) error { if g.c == nil { _, _, err := net.SplitHostPort(addr.Host) @@ -198,7 +198,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error { } func init() { - plugins.Add("disque", func() plugins.Plugin { + inputs.Add("disque", func() inputs.Input { return &Disque{} }) } diff --git a/plugins/disque/disque_test.go b/plugins/inputs/disque/disque_test.go similarity index 100% rename from plugins/disque/disque_test.go rename to plugins/inputs/disque/disque_test.go diff --git a/plugins/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md similarity index 100% rename from plugins/elasticsearch/README.md rename to plugins/inputs/elasticsearch/README.md diff --git a/plugins/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go similarity index 95% rename from plugins/elasticsearch/elasticsearch.go rename to plugins/inputs/elasticsearch/elasticsearch.go index 2266f2243..f8185a053 100644 --- a/plugins/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -7,7 +7,7 @@ import ( "time" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) const statsPath = "/_nodes/stats" @@ -92,7 +92,7 @@ func (e *Elasticsearch) Description() string { // Gather reads the stats from Elasticsearch and writes it to the // Accumulator. -func (e *Elasticsearch) Gather(acc plugins.Accumulator) error { +func (e *Elasticsearch) Gather(acc inputs.Accumulator) error { for _, serv := range e.Servers { var url string if e.Local { @@ -110,7 +110,7 @@ func (e *Elasticsearch) Gather(acc plugins.Accumulator) error { return nil } -func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) error { +func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) error { nodeStats := &struct { ClusterName string `json:"cluster_name"` Nodes map[string]*node `json:"nodes"` @@ -155,7 +155,7 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) err return nil } -func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator) error { +func (e *Elasticsearch) gatherClusterStats(url string, acc inputs.Accumulator) error { clusterStats := &clusterHealth{} if err := e.gatherData(url, clusterStats); err != nil { return err @@ -220,7 +220,7 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error { } func init() { - plugins.Add("elasticsearch", func() plugins.Plugin { + inputs.Add("elasticsearch", func() inputs.Input { return NewElasticsearch() }) } diff --git a/plugins/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go similarity index 100% rename from plugins/elasticsearch/elasticsearch_test.go rename to plugins/inputs/elasticsearch/elasticsearch_test.go diff --git a/plugins/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go similarity index 100% rename from plugins/elasticsearch/testdata_test.go rename to plugins/inputs/elasticsearch/testdata_test.go diff --git a/plugins/exec/README.md b/plugins/inputs/exec/README.md similarity index 100% rename from plugins/exec/README.md rename to plugins/inputs/exec/README.md diff --git a/plugins/exec/exec.go b/plugins/inputs/exec/exec.go similarity index 78% rename from plugins/exec/exec.go rename to plugins/inputs/exec/exec.go index 87a8cc72f..b3c1001f8 100644 --- a/plugins/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -9,20 +9,19 @@ import ( "github.com/gonuts/go-shellquote" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) const sampleConfig = ` # the command to run command = "/usr/bin/mycollector --foo=bar" - # name of the command (used as a prefix for measurements) - name = "mycollector" + # measurement name suffix (for separating different commands) + name_suffix = "_mycollector" ` type Exec struct { Command string - Name string runner Runner } @@ -62,7 +61,7 @@ func (e *Exec) Description() string { return "Read flattened metrics from one or more commands that output JSON to stdout" } -func (e *Exec) Gather(acc plugins.Accumulator) error { +func (e *Exec) Gather(acc inputs.Accumulator) error { out, err := e.runner.Run(e) if err != nil { return err @@ -81,18 +80,12 @@ func (e *Exec) Gather(acc plugins.Accumulator) error { return err } - var msrmnt_name string - if e.Name == "" { - msrmnt_name = "exec" - } else { - msrmnt_name = "exec_" + e.Name - } - acc.AddFields(msrmnt_name, f.Fields, nil) + acc.AddFields("exec", f.Fields, nil) return nil } func init() { - plugins.Add("exec", func() plugins.Plugin { + inputs.Add("exec", func() inputs.Input { return NewExec() }) } diff --git a/plugins/exec/exec_test.go b/plugins/inputs/exec/exec_test.go similarity index 93% rename from plugins/exec/exec_test.go rename to plugins/inputs/exec/exec_test.go index bb94a2fd5..d3e54429d 100644 --- a/plugins/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -54,7 +54,6 @@ func TestExec(t *testing.T) { e := &Exec{ runner: newRunnerMock([]byte(validJson), nil), Command: "testcommand arg1", - Name: "mycollector", } var acc testutil.Accumulator @@ -68,14 +67,13 @@ func TestExec(t *testing.T) { "cpu_free": float64(32), "percent": float64(0.81), } - acc.AssertContainsFields(t, "exec_mycollector", fields) + acc.AssertContainsFields(t, "exec", fields) } func TestExecMalformed(t *testing.T) { e := &Exec{ runner: newRunnerMock([]byte(malformedJson), nil), Command: "badcommand arg1", - Name: "mycollector", } var acc testutil.Accumulator @@ -88,7 +86,6 @@ func TestCommandError(t *testing.T) { e := &Exec{ runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")), Command: "badcommand", - Name: "mycollector", } var acc testutil.Accumulator diff --git a/plugins/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go similarity index 97% rename from plugins/haproxy/haproxy.go rename to plugins/inputs/haproxy/haproxy.go index 10f7dab3c..23b92fc26 100644 --- a/plugins/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -3,7 +3,7 @@ package haproxy import ( "encoding/csv" "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "io" "net/http" "net/url" @@ -104,7 +104,7 @@ func (r *haproxy) Description() string { // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *haproxy) Gather(acc plugins.Accumulator) error { +func (g *haproxy) Gather(acc inputs.Accumulator) error { if len(g.Servers) == 0 { return g.gatherServer("http://127.0.0.1:1936", acc) } @@ -126,7 +126,7 @@ func (g *haproxy) Gather(acc plugins.Accumulator) error { return outerr } -func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error { +func (g *haproxy) gatherServer(addr string, acc inputs.Accumulator) error { if g.client == nil { client := &http.Client{} @@ -156,7 +156,7 @@ func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error { return importCsvResult(res.Body, acc, u.Host) } -func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error { +func importCsvResult(r io.Reader, acc inputs.Accumulator, host string) error { csv := csv.NewReader(r) result, err := csv.ReadAll() now := time.Now() @@ -358,7 +358,7 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error { } func init() { - plugins.Add("haproxy", func() plugins.Plugin { + inputs.Add("haproxy", func() inputs.Input { return &haproxy{} }) } diff --git a/plugins/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go similarity index 100% rename from plugins/haproxy/haproxy_test.go rename to plugins/inputs/haproxy/haproxy_test.go diff --git a/plugins/httpjson/README.md b/plugins/inputs/httpjson/README.md similarity index 100% rename from plugins/httpjson/README.md rename to plugins/inputs/httpjson/README.md diff --git a/plugins/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go similarity index 95% rename from plugins/httpjson/httpjson.go rename to plugins/inputs/httpjson/httpjson.go index 9da0f63d0..e31085e3a 100644 --- a/plugins/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -11,7 +11,7 @@ import ( "sync" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type HttpJson struct { @@ -63,7 +63,7 @@ var sampleConfig = ` # ] # HTTP parameters (all values must be strings) - [plugins.httpjson.parameters] + [inputs.httpjson.parameters] event_type = "cpu_spike" threshold = "0.75" ` @@ -77,7 +77,7 @@ func (h *HttpJson) Description() string { } // Gathers data for all servers. -func (h *HttpJson) Gather(acc plugins.Accumulator) error { +func (h *HttpJson) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup errorChannel := make(chan error, len(h.Servers)) @@ -116,7 +116,7 @@ func (h *HttpJson) Gather(acc plugins.Accumulator) error { // Returns: // error: Any error that may have occurred func (h *HttpJson) gatherServer( - acc plugins.Accumulator, + acc inputs.Accumulator, serverURL string, ) error { resp, err := h.sendRequest(serverURL) @@ -210,7 +210,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, error) { } func init() { - plugins.Add("httpjson", func() plugins.Plugin { + inputs.Add("httpjson", func() inputs.Input { return &HttpJson{client: RealHTTPClient{client: &http.Client{}}} }) } diff --git a/plugins/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go similarity index 100% rename from plugins/httpjson/httpjson_test.go rename to plugins/inputs/httpjson/httpjson_test.go diff --git a/plugins/influxdb/README.md b/plugins/inputs/influxdb/README.md similarity index 98% rename from plugins/influxdb/README.md rename to plugins/inputs/influxdb/README.md index 8d4727973..84dc3caf8 100644 --- a/plugins/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -5,7 +5,7 @@ The influxdb plugin collects InfluxDB-formatted data from JSON endpoints. With a configuration of: ```toml -[[plugins.influxdb]] +[[inputs.influxdb]] urls = [ "http://127.0.0.1:8086/debug/vars", "http://192.168.2.1:8086/debug/vars" diff --git a/plugins/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go similarity index 94% rename from plugins/influxdb/influxdb.go rename to plugins/inputs/influxdb/influxdb.go index a4044364a..cf5742e1d 100644 --- a/plugins/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -8,7 +8,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type InfluxDB struct { @@ -32,7 +32,7 @@ func (*InfluxDB) SampleConfig() string { ` } -func (i *InfluxDB) Gather(acc plugins.Accumulator) error { +func (i *InfluxDB) Gather(acc inputs.Accumulator) error { errorChannel := make(chan error, len(i.URLs)) var wg sync.WaitGroup @@ -77,7 +77,7 @@ type point struct { // Returns: // error: Any error that may have occurred func (i *InfluxDB) gatherURL( - acc plugins.Accumulator, + acc inputs.Accumulator, url string, ) error { resp, err := http.Get(url) @@ -140,7 +140,7 @@ func (i *InfluxDB) gatherURL( } func init() { - plugins.Add("influxdb", func() plugins.Plugin { + inputs.Add("influxdb", func() inputs.Input { return &InfluxDB{} }) } diff --git a/plugins/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go similarity index 97% rename from plugins/influxdb/influxdb_test.go rename to plugins/inputs/influxdb/influxdb_test.go index 7911156de..0e02cc6bd 100644 --- a/plugins/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -5,7 +5,7 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/plugins/influxdb" + "github.com/influxdb/telegraf/plugins/inputs/influxdb" "github.com/influxdb/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/jolokia/README.md b/plugins/inputs/jolokia/README.md similarity index 100% rename from plugins/jolokia/README.md rename to plugins/inputs/jolokia/README.md diff --git a/plugins/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go similarity index 94% rename from plugins/jolokia/jolokia.go rename to plugins/inputs/jolokia/jolokia.go index 610f08cd5..36811bd27 100644 --- a/plugins/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -8,7 +8,7 @@ import ( "net/http" "net/url" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Server struct { @@ -49,7 +49,7 @@ func (j *Jolokia) SampleConfig() string { context = "/jolokia/read" # List of servers exposing jolokia read service - [[plugins.jolokia.servers]] + [[inputs.jolokia.servers]] name = "stable" host = "192.168.103.2" port = "8180" @@ -59,7 +59,7 @@ func (j *Jolokia) SampleConfig() string { # List of metrics collected on above servers # Each metric consists in a name, a jmx path and either a pass or drop slice attributes # This collect all heap memory usage metrics - [[plugins.jolokia.metrics]] + [[inputs.jolokia.metrics]] name = "heap_memory_usage" jmx = "/java.lang:type=Memory/HeapMemoryUsage" ` @@ -108,7 +108,7 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { return jsonOut, nil } -func (j *Jolokia) Gather(acc plugins.Accumulator) error { +func (j *Jolokia) Gather(acc inputs.Accumulator) error { context := j.Context //"/jolokia/read" servers := j.Servers metrics := j.Metrics @@ -157,7 +157,7 @@ func (j *Jolokia) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("jolokia", func() plugins.Plugin { + inputs.Add("jolokia", func() inputs.Input { return &Jolokia{jClient: &JolokiaClientImpl{client: &http.Client{}}} }) } diff --git a/plugins/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go similarity index 100% rename from plugins/jolokia/jolokia_test.go rename to plugins/inputs/jolokia/jolokia_test.go diff --git a/plugins/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md similarity index 100% rename from plugins/kafka_consumer/README.md rename to plugins/inputs/kafka_consumer/README.md diff --git a/plugins/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go similarity index 96% rename from plugins/kafka_consumer/kafka_consumer.go rename to plugins/inputs/kafka_consumer/kafka_consumer.go index f47e7e92c..f3558e2e5 100644 --- a/plugins/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/influxdb/influxdb/models" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "github.com/Shopify/sarama" "github.com/wvanbergen/kafka/consumergroup" @@ -148,7 +148,7 @@ func (k *Kafka) Stop() { } } -func (k *Kafka) Gather(acc plugins.Accumulator) error { +func (k *Kafka) Gather(acc inputs.Accumulator) error { k.Lock() defer k.Unlock() npoints := len(k.pointChan) @@ -160,7 +160,7 @@ func (k *Kafka) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("kafka_consumer", func() plugins.Plugin { + inputs.Add("kafka_consumer", func() inputs.Input { return &Kafka{} }) } diff --git a/plugins/kafka_consumer/kafka_consumer_integration_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go similarity index 100% rename from plugins/kafka_consumer/kafka_consumer_integration_test.go rename to plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go diff --git a/plugins/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go similarity index 100% rename from plugins/kafka_consumer/kafka_consumer_test.go rename to plugins/inputs/kafka_consumer/kafka_consumer_test.go diff --git a/plugins/leofs/leofs.go b/plugins/inputs/leofs/leofs.go similarity index 96% rename from plugins/leofs/leofs.go rename to plugins/inputs/leofs/leofs.go index 5a1df7c56..c65db5f37 100644 --- a/plugins/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -3,7 +3,7 @@ package leofs import ( "bufio" "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "net/url" "os/exec" "strconv" @@ -146,7 +146,7 @@ func (l *LeoFS) Description() string { return "Read metrics from a LeoFS Server via SNMP" } -func (l *LeoFS) Gather(acc plugins.Accumulator) error { +func (l *LeoFS) Gather(acc inputs.Accumulator) error { if len(l.Servers) == 0 { l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc) return nil @@ -176,7 +176,7 @@ func (l *LeoFS) Gather(acc plugins.Accumulator) error { return outerr } -func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins.Accumulator) error { +func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc inputs.Accumulator) error { cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid) stdout, err := cmd.StdoutPipe() if err != nil { @@ -225,7 +225,7 @@ func retrieveTokenAfterColon(line string) (string, error) { } func init() { - plugins.Add("leofs", func() plugins.Plugin { + inputs.Add("leofs", func() inputs.Input { return &LeoFS{} }) } diff --git a/plugins/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go similarity index 100% rename from plugins/leofs/leofs_test.go rename to plugins/inputs/leofs/leofs_test.go diff --git a/plugins/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go similarity index 96% rename from plugins/lustre2/lustre2.go rename to plugins/inputs/lustre2/lustre2.go index 65f936966..90222af79 100644 --- a/plugins/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -14,7 +14,7 @@ import ( "strings" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) // Lustre proc files can change between versions, so we want to future-proof @@ -129,7 +129,7 @@ var wanted_mds_fields = []*mapping{ }, } -func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc plugins.Accumulator) error { +func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc inputs.Accumulator) error { files, err := filepath.Glob(fileglob) if err != nil { return err @@ -193,7 +193,7 @@ func (l *Lustre2) Description() string { } // Gather reads stats from all lustre targets -func (l *Lustre2) Gather(acc plugins.Accumulator) error { +func (l *Lustre2) Gather(acc inputs.Accumulator) error { l.allFields = make(map[string]map[string]interface{}) if len(l.Ost_procfiles) == 0 { @@ -244,7 +244,7 @@ func (l *Lustre2) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("lustre2", func() plugins.Plugin { + inputs.Add("lustre2", func() inputs.Input { return &Lustre2{} }) } diff --git a/plugins/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go similarity index 100% rename from plugins/lustre2/lustre2_test.go rename to plugins/inputs/lustre2/lustre2_test.go diff --git a/plugins/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go similarity index 100% rename from plugins/mailchimp/chimp_api.go rename to plugins/inputs/mailchimp/chimp_api.go diff --git a/plugins/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go similarity index 93% rename from plugins/mailchimp/mailchimp.go rename to plugins/inputs/mailchimp/mailchimp.go index d26d479af..4b148a95c 100644 --- a/plugins/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type MailChimp struct { @@ -34,7 +34,7 @@ func (m *MailChimp) Description() string { return "Gathers metrics from the /3.0/reports MailChimp API" } -func (m *MailChimp) Gather(acc plugins.Accumulator) error { +func (m *MailChimp) Gather(acc inputs.Accumulator) error { if m.api == nil { m.api = NewChimpAPI(m.ApiKey) } @@ -71,7 +71,7 @@ func (m *MailChimp) Gather(acc plugins.Accumulator) error { return nil } -func gatherReport(acc plugins.Accumulator, report Report, now time.Time) { +func gatherReport(acc inputs.Accumulator, report Report, now time.Time) { tags := make(map[string]string) tags["id"] = report.ID tags["campaign_title"] = report.CampaignTitle @@ -110,7 +110,7 @@ func gatherReport(acc plugins.Accumulator, report Report, now time.Time) { } func init() { - plugins.Add("mailchimp", func() plugins.Plugin { + inputs.Add("mailchimp", func() inputs.Input { return &MailChimp{} }) } diff --git a/plugins/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go similarity index 100% rename from plugins/mailchimp/mailchimp_test.go rename to plugins/inputs/mailchimp/mailchimp_test.go diff --git a/plugins/memcached/memcached.go b/plugins/inputs/memcached/memcached.go similarity index 95% rename from plugins/memcached/memcached.go rename to plugins/inputs/memcached/memcached.go index 196478264..1d9ee9547 100644 --- a/plugins/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -8,7 +8,7 @@ import ( "strconv" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) // Memcached is a memcached plugin @@ -69,7 +69,7 @@ func (m *Memcached) Description() string { } // Gather reads stats from all configured servers accumulates stats -func (m *Memcached) Gather(acc plugins.Accumulator) error { +func (m *Memcached) Gather(acc inputs.Accumulator) error { if len(m.Servers) == 0 && len(m.UnixSockets) == 0 { return m.gatherServer(":11211", false, acc) } @@ -92,7 +92,7 @@ func (m *Memcached) Gather(acc plugins.Accumulator) error { func (m *Memcached) gatherServer( address string, unix bool, - acc plugins.Accumulator, + acc inputs.Accumulator, ) error { var conn net.Conn if unix { @@ -178,7 +178,7 @@ func parseResponse(r *bufio.Reader) (map[string]string, error) { } func init() { - plugins.Add("memcached", func() plugins.Plugin { + inputs.Add("memcached", func() inputs.Input { return &Memcached{} }) } diff --git a/plugins/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go similarity index 100% rename from plugins/memcached/memcached_test.go rename to plugins/inputs/memcached/memcached_test.go diff --git a/plugins/mock_Plugin.go b/plugins/inputs/mock_Plugin.go similarity index 92% rename from plugins/mock_Plugin.go rename to plugins/inputs/mock_Plugin.go index 492384b25..87dd14884 100644 --- a/plugins/mock_Plugin.go +++ b/plugins/inputs/mock_Plugin.go @@ -1,4 +1,4 @@ -package plugins +package inputs import "github.com/stretchr/testify/mock" diff --git a/plugins/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go similarity index 93% rename from plugins/mongodb/mongodb.go rename to plugins/inputs/mongodb/mongodb.go index 40c77931a..4cb3ffee5 100644 --- a/plugins/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "gopkg.in/mgo.v2" ) @@ -45,7 +45,7 @@ var localhost = &url.URL{Host: "127.0.0.1:27017"} // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (m *MongoDB) Gather(acc plugins.Accumulator) error { +func (m *MongoDB) Gather(acc inputs.Accumulator) error { if len(m.Servers) == 0 { m.gatherServer(m.getMongoServer(localhost), acc) return nil @@ -88,7 +88,7 @@ func (m *MongoDB) getMongoServer(url *url.URL) *Server { return m.mongos[url.Host] } -func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error { +func (m *MongoDB) gatherServer(server *Server, acc inputs.Accumulator) error { if server.Session == nil { var dialAddrs []string if server.Url.User != nil { @@ -138,7 +138,7 @@ func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error { } func init() { - plugins.Add("mongodb", func() plugins.Plugin { + inputs.Add("mongodb", func() inputs.Input { return &MongoDB{ mongos: make(map[string]*Server), } diff --git a/plugins/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go similarity index 96% rename from plugins/mongodb/mongodb_data.go rename to plugins/inputs/mongodb/mongodb_data.go index 1ebb76ced..15f8c479b 100644 --- a/plugins/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -5,7 +5,7 @@ import ( "reflect" "strconv" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type MongodbData struct { @@ -97,7 +97,7 @@ func (d *MongodbData) add(key string, val interface{}) { d.Fields[key] = val } -func (d *MongodbData) flush(acc plugins.Accumulator) { +func (d *MongodbData) flush(acc inputs.Accumulator) { acc.AddFields( "mongodb", d.Fields, diff --git a/plugins/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go similarity index 100% rename from plugins/mongodb/mongodb_data_test.go rename to plugins/inputs/mongodb/mongodb_data_test.go diff --git a/plugins/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go similarity index 90% rename from plugins/mongodb/mongodb_server.go rename to plugins/inputs/mongodb/mongodb_server.go index 134be5bae..795cf97d7 100644 --- a/plugins/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -4,7 +4,7 @@ import ( "net/url" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) @@ -21,7 +21,7 @@ func (s *Server) getDefaultTags() map[string]string { return tags } -func (s *Server) gatherData(acc plugins.Accumulator) error { +func (s *Server) gatherData(acc inputs.Accumulator) error { s.Session.SetMode(mgo.Eventual, true) s.Session.SetSocketTimeout(0) result := &ServerStatus{} diff --git a/plugins/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go similarity index 100% rename from plugins/mongodb/mongodb_server_test.go rename to plugins/inputs/mongodb/mongodb_server_test.go diff --git a/plugins/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go similarity index 100% rename from plugins/mongodb/mongodb_test.go rename to plugins/inputs/mongodb/mongodb_test.go diff --git a/plugins/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go similarity index 100% rename from plugins/mongodb/mongostat.go rename to plugins/inputs/mongodb/mongostat.go diff --git a/plugins/mysql/mysql.go b/plugins/inputs/mysql/mysql.go similarity index 94% rename from plugins/mysql/mysql.go rename to plugins/inputs/mysql/mysql.go index db99123ff..f9126b5ea 100644 --- a/plugins/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -6,7 +6,7 @@ import ( "strings" _ "github.com/go-sql-driver/mysql" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Mysql struct { @@ -35,7 +35,7 @@ func (m *Mysql) Description() string { var localhost = "" -func (m *Mysql) Gather(acc plugins.Accumulator) error { +func (m *Mysql) Gather(acc inputs.Accumulator) error { if len(m.Servers) == 0 { // if we can't get stats in this case, thats fine, don't report // an error. @@ -113,7 +113,7 @@ var mappings = []*mapping{ }, } -func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { +func (m *Mysql) gatherServer(serv string, acc inputs.Accumulator) error { // If user forgot the '/', add it if strings.HasSuffix(serv, ")") { serv = serv + "/" @@ -207,7 +207,7 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { } func init() { - plugins.Add("mysql", func() plugins.Plugin { + inputs.Add("mysql", func() inputs.Input { return &Mysql{} }) } diff --git a/plugins/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go similarity index 100% rename from plugins/mysql/mysql_test.go rename to plugins/inputs/mysql/mysql_test.go diff --git a/plugins/mysql/parse_dsn.go b/plugins/inputs/mysql/parse_dsn.go similarity index 100% rename from plugins/mysql/parse_dsn.go rename to plugins/inputs/mysql/parse_dsn.go diff --git a/plugins/nginx/nginx.go b/plugins/inputs/nginx/nginx.go similarity index 93% rename from plugins/nginx/nginx.go rename to plugins/inputs/nginx/nginx.go index 5d7aace2a..18e3244f7 100644 --- a/plugins/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Nginx struct { @@ -31,7 +31,7 @@ func (n *Nginx) Description() string { return "Read Nginx's basic status information (ngx_http_stub_status_module)" } -func (n *Nginx) Gather(acc plugins.Accumulator) error { +func (n *Nginx) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup var outerr error @@ -59,7 +59,7 @@ var tr = &http.Transport{ var client = &http.Client{Transport: tr} -func (n *Nginx) gatherUrl(addr *url.URL, acc plugins.Accumulator) error { +func (n *Nginx) gatherUrl(addr *url.URL, acc inputs.Accumulator) error { resp, err := client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) @@ -159,7 +159,7 @@ func getTags(addr *url.URL) map[string]string { } func init() { - plugins.Add("nginx", func() plugins.Plugin { + inputs.Add("nginx", func() inputs.Input { return &Nginx{} }) } diff --git a/plugins/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go similarity index 100% rename from plugins/nginx/nginx_test.go rename to plugins/inputs/nginx/nginx_test.go diff --git a/plugins/phpfpm/README.md b/plugins/inputs/phpfpm/README.md similarity index 98% rename from plugins/phpfpm/README.md rename to plugins/inputs/phpfpm/README.md index d2e52534c..c2a42523a 100644 --- a/plugins/phpfpm/README.md +++ b/plugins/inputs/phpfpm/README.md @@ -43,7 +43,7 @@ Using this configuration: When run with: ``` -./telegraf -config telegraf.conf -filter phpfpm -test +./telegraf -config telegraf.conf -input-filter phpfpm -test ``` It produces: diff --git a/plugins/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go similarity index 94% rename from plugins/phpfpm/phpfpm.go rename to plugins/inputs/phpfpm/phpfpm.go index 2f2164913..ceffc673e 100644 --- a/plugins/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -11,7 +11,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) const ( @@ -67,7 +67,7 @@ func (r *phpfpm) Description() string { // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *phpfpm) Gather(acc plugins.Accumulator) error { +func (g *phpfpm) Gather(acc inputs.Accumulator) error { if len(g.Urls) == 0 { return g.gatherServer("http://127.0.0.1/status", acc) } @@ -90,7 +90,7 @@ func (g *phpfpm) Gather(acc plugins.Accumulator) error { } // Request status page to get stat raw data -func (g *phpfpm) gatherServer(addr string, acc plugins.Accumulator) error { +func (g *phpfpm) gatherServer(addr string, acc inputs.Accumulator) error { if g.client == nil { client := &http.Client{} @@ -153,7 +153,7 @@ func (g *phpfpm) gatherServer(addr string, acc plugins.Accumulator) error { } // Import HTTP stat data into Telegraf system -func importMetric(r io.Reader, acc plugins.Accumulator, host string) (poolStat, error) { +func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, error) { stats := make(poolStat) var currentPool string @@ -209,7 +209,7 @@ func importMetric(r io.Reader, acc plugins.Accumulator, host string) (poolStat, } func init() { - plugins.Add("phpfpm", func() plugins.Plugin { + inputs.Add("phpfpm", func() inputs.Input { return &phpfpm{} }) } diff --git a/plugins/phpfpm/phpfpm_fcgi.go b/plugins/inputs/phpfpm/phpfpm_fcgi.go similarity index 100% rename from plugins/phpfpm/phpfpm_fcgi.go rename to plugins/inputs/phpfpm/phpfpm_fcgi.go diff --git a/plugins/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go similarity index 100% rename from plugins/phpfpm/phpfpm_test.go rename to plugins/inputs/phpfpm/phpfpm_test.go diff --git a/plugins/ping/ping.go b/plugins/inputs/ping/ping.go similarity index 96% rename from plugins/ping/ping.go rename to plugins/inputs/ping/ping.go index 72eee91ad..e03fc71e8 100644 --- a/plugins/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) // HostPinger is a function that runs the "ping" function using a list of @@ -56,7 +56,7 @@ func (_ *Ping) SampleConfig() string { return sampleConfig } -func (p *Ping) Gather(acc plugins.Accumulator) error { +func (p *Ping) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup errorChannel := make(chan error, len(p.Urls)*2) @@ -64,7 +64,7 @@ func (p *Ping) Gather(acc plugins.Accumulator) error { // Spin off a go routine for each url to ping for _, url := range p.Urls { wg.Add(1) - go func(url string, acc plugins.Accumulator) { + go func(url string, acc inputs.Accumulator) { defer wg.Done() args := p.args(url) out, err := p.pingHost(args...) @@ -174,7 +174,7 @@ func processPingOutput(out string) (int, int, float64, error) { } func init() { - plugins.Add("ping", func() plugins.Plugin { + inputs.Add("ping", func() inputs.Input { return &Ping{pingHost: hostPinger} }) } diff --git a/plugins/ping/ping_test.go b/plugins/inputs/ping/ping_test.go similarity index 100% rename from plugins/ping/ping_test.go rename to plugins/inputs/ping/ping_test.go diff --git a/plugins/postgresql/README.md b/plugins/inputs/postgresql/README.md similarity index 100% rename from plugins/postgresql/README.md rename to plugins/inputs/postgresql/README.md diff --git a/plugins/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go similarity index 93% rename from plugins/postgresql/postgresql.go rename to plugins/inputs/postgresql/postgresql.go index eaefadb50..c356cea77 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -6,7 +6,7 @@ import ( "fmt" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" _ "github.com/lib/pq" ) @@ -53,7 +53,7 @@ func (p *Postgresql) IgnoredColumns() map[string]bool { var localhost = "host=localhost sslmode=disable" -func (p *Postgresql) Gather(acc plugins.Accumulator) error { +func (p *Postgresql) Gather(acc inputs.Accumulator) error { var query string if p.Address == "" || p.Address == "localhost" { @@ -101,7 +101,7 @@ type scanner interface { Scan(dest ...interface{}) error } -func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator) error { +func (p *Postgresql) accRow(row scanner, acc inputs.Accumulator) error { var columnVars []interface{} var dbname bytes.Buffer @@ -145,7 +145,7 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator) error { } func init() { - plugins.Add("postgresql", func() plugins.Plugin { + inputs.Add("postgresql", func() inputs.Input { return &Postgresql{} }) } diff --git a/plugins/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go similarity index 100% rename from plugins/postgresql/postgresql_test.go rename to plugins/inputs/postgresql/postgresql_test.go diff --git a/plugins/procstat/README.md b/plugins/inputs/procstat/README.md similarity index 100% rename from plugins/procstat/README.md rename to plugins/inputs/procstat/README.md diff --git a/plugins/procstat/procstat.go b/plugins/inputs/procstat/procstat.go similarity index 95% rename from plugins/procstat/procstat.go rename to plugins/inputs/procstat/procstat.go index 2f171db3e..5e596d6d8 100644 --- a/plugins/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -10,7 +10,7 @@ import ( "github.com/shirou/gopsutil/process" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Procstat struct { @@ -45,7 +45,7 @@ func (_ *Procstat) Description() string { return "Monitor process cpu and memory usage" } -func (p *Procstat) Gather(acc plugins.Accumulator) error { +func (p *Procstat) Gather(acc inputs.Accumulator) error { procs, err := p.createProcesses() if err != nil { log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s", @@ -161,7 +161,7 @@ func pidsFromPattern(pattern string) ([]int32, error) { } func init() { - plugins.Add("procstat", func() plugins.Plugin { + inputs.Add("procstat", func() inputs.Input { return NewProcstat() }) } diff --git a/plugins/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go similarity index 100% rename from plugins/procstat/procstat_test.go rename to plugins/inputs/procstat/procstat_test.go diff --git a/plugins/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go similarity index 96% rename from plugins/procstat/spec_processor.go rename to plugins/inputs/procstat/spec_processor.go index a61152c9b..9c7e53826 100644 --- a/plugins/procstat/spec_processor.go +++ b/plugins/inputs/procstat/spec_processor.go @@ -6,14 +6,14 @@ import ( "github.com/shirou/gopsutil/process" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type SpecProcessor struct { Prefix string tags map[string]string fields map[string]interface{} - acc plugins.Accumulator + acc inputs.Accumulator proc *process.Process } @@ -34,7 +34,7 @@ func (p *SpecProcessor) flush() { func NewSpecProcessor( prefix string, - acc plugins.Accumulator, + acc inputs.Accumulator, p *process.Process, ) *SpecProcessor { tags := make(map[string]string) diff --git a/plugins/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go similarity index 89% rename from plugins/prometheus/prometheus.go rename to plugins/inputs/prometheus/prometheus.go index 2742393c7..758788b8d 100644 --- a/plugins/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -3,7 +3,7 @@ package prometheus import ( "errors" "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "io" @@ -32,7 +32,7 @@ var ErrProtocolError = errors.New("prometheus protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *Prometheus) Gather(acc plugins.Accumulator) error { +func (g *Prometheus) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup var outerr error @@ -50,7 +50,7 @@ func (g *Prometheus) Gather(acc plugins.Accumulator) error { return outerr } -func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error { +func (g *Prometheus) gatherURL(url string, acc inputs.Accumulator) error { resp, err := http.Get(url) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", url, err) @@ -97,7 +97,7 @@ func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error { } func init() { - plugins.Add("prometheus", func() plugins.Plugin { + inputs.Add("prometheus", func() inputs.Input { return &Prometheus{} }) } diff --git a/plugins/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go similarity index 100% rename from plugins/prometheus/prometheus_test.go rename to plugins/inputs/prometheus/prometheus_test.go diff --git a/plugins/puppetagent/README.md b/plugins/inputs/puppetagent/README.md similarity index 100% rename from plugins/puppetagent/README.md rename to plugins/inputs/puppetagent/README.md diff --git a/plugins/puppetagent/last_run_summary.yaml b/plugins/inputs/puppetagent/last_run_summary.yaml similarity index 100% rename from plugins/puppetagent/last_run_summary.yaml rename to plugins/inputs/puppetagent/last_run_summary.yaml diff --git a/plugins/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go similarity index 92% rename from plugins/puppetagent/puppetagent.go rename to plugins/inputs/puppetagent/puppetagent.go index 8939e5963..d0bedae9e 100644 --- a/plugins/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -8,7 +8,7 @@ import ( "reflect" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) // PuppetAgent is a PuppetAgent plugin @@ -82,7 +82,7 @@ func (pa *PuppetAgent) Description() string { } // Gather reads stats from all configured servers accumulates stats -func (pa *PuppetAgent) Gather(acc plugins.Accumulator) error { +func (pa *PuppetAgent) Gather(acc inputs.Accumulator) error { if len(pa.Location) == 0 { pa.Location = "/var/lib/puppet/state/last_run_summary.yaml" @@ -110,7 +110,7 @@ func (pa *PuppetAgent) Gather(acc plugins.Accumulator) error { return nil } -func structPrinter(s *State, acc plugins.Accumulator, tags map[string]string) { +func structPrinter(s *State, acc inputs.Accumulator, tags map[string]string) { e := reflect.ValueOf(s).Elem() fields := make(map[string]interface{}) @@ -131,7 +131,7 @@ func structPrinter(s *State, acc plugins.Accumulator, tags map[string]string) { } func init() { - plugins.Add("puppetagent", func() plugins.Plugin { + inputs.Add("puppetagent", func() inputs.Input { return &PuppetAgent{} }) } diff --git a/plugins/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go similarity index 100% rename from plugins/puppetagent/puppetagent_test.go rename to plugins/inputs/puppetagent/puppetagent_test.go diff --git a/plugins/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go similarity index 94% rename from plugins/rabbitmq/rabbitmq.go rename to plugins/inputs/rabbitmq/rabbitmq.go index 227811bc8..fc95af494 100644 --- a/plugins/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -7,7 +7,7 @@ import ( "strconv" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) const DefaultUsername = "guest" @@ -91,7 +91,7 @@ type Node struct { SocketsUsed int64 `json:"sockets_used"` } -type gatherFunc func(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) +type gatherFunc func(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues} @@ -114,7 +114,7 @@ func (r *RabbitMQ) Description() string { return "Read metrics from one or many RabbitMQ servers via the management API" } -func (r *RabbitMQ) Gather(acc plugins.Accumulator) error { +func (r *RabbitMQ) Gather(acc inputs.Accumulator) error { if r.Client == nil { r.Client = &http.Client{} } @@ -167,7 +167,7 @@ func (r *RabbitMQ) requestJSON(u string, target interface{}) error { return nil } -func gatherOverview(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) { +func gatherOverview(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) { overview := &OverviewResponse{} err := r.requestJSON("/api/overview", &overview) @@ -203,7 +203,7 @@ func gatherOverview(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) { errChan <- nil } -func gatherNodes(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) { +func gatherNodes(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) { nodes := make([]Node, 0) // Gather information about nodes err := r.requestJSON("/api/nodes", &nodes) @@ -240,7 +240,7 @@ func gatherNodes(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) { errChan <- nil } -func gatherQueues(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) { +func gatherQueues(r *RabbitMQ, acc inputs.Accumulator, errChan chan error) { // Gather information about queues queues := make([]Queue, 0) err := r.requestJSON("/api/queues", &queues) @@ -320,7 +320,7 @@ func (r *RabbitMQ) shouldGatherQueue(queue Queue) bool { } func init() { - plugins.Add("rabbitmq", func() plugins.Plugin { + inputs.Add("rabbitmq", func() inputs.Input { return &RabbitMQ{} }) } diff --git a/plugins/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go similarity index 100% rename from plugins/rabbitmq/rabbitmq_test.go rename to plugins/inputs/rabbitmq/rabbitmq_test.go diff --git a/plugins/redis/redis.go b/plugins/inputs/redis/redis.go similarity index 95% rename from plugins/redis/redis.go rename to plugins/inputs/redis/redis.go index 2e338ff19..c9e98e886 100644 --- a/plugins/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Redis struct { @@ -76,7 +76,7 @@ var ErrProtocolError = errors.New("redis protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (r *Redis) Gather(acc plugins.Accumulator) error { +func (r *Redis) Gather(acc inputs.Accumulator) error { if len(r.Servers) == 0 { url := &url.URL{ Host: ":6379", @@ -113,7 +113,7 @@ func (r *Redis) Gather(acc plugins.Accumulator) error { const defaultPort = "6379" -func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { +func (r *Redis) gatherServer(addr *url.URL, acc inputs.Accumulator) error { _, _, err := net.SplitHostPort(addr.Host) if err != nil { addr.Host = addr.Host + ":" + defaultPort @@ -158,7 +158,7 @@ func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { // gatherInfoOutput gathers func gatherInfoOutput( rdr *bufio.Reader, - acc plugins.Accumulator, + acc inputs.Accumulator, tags map[string]string, ) error { var keyspace_hits, keyspace_misses uint64 = 0, 0 @@ -227,7 +227,7 @@ func gatherInfoOutput( func gatherKeyspaceLine( name string, line string, - acc plugins.Accumulator, + acc inputs.Accumulator, tags map[string]string, ) { if strings.Contains(line, "keys=") { @@ -246,7 +246,7 @@ func gatherKeyspaceLine( } func init() { - plugins.Add("redis", func() plugins.Plugin { + inputs.Add("redis", func() inputs.Input { return &Redis{} }) } diff --git a/plugins/redis/redis_test.go b/plugins/inputs/redis/redis_test.go similarity index 100% rename from plugins/redis/redis_test.go rename to plugins/inputs/redis/redis_test.go diff --git a/plugins/registry.go b/plugins/inputs/registry.go similarity index 64% rename from plugins/registry.go rename to plugins/inputs/registry.go index 3e544917d..2b99078f0 100644 --- a/plugins/registry.go +++ b/plugins/inputs/registry.go @@ -1,4 +1,4 @@ -package plugins +package inputs import "time" @@ -17,40 +17,40 @@ type Accumulator interface { t ...time.Time) } -type Plugin interface { - // SampleConfig returns the default configuration of the Plugin +type Input interface { + // SampleConfig returns the default configuration of the Input SampleConfig() string - // Description returns a one-sentence description on the Plugin + // Description returns a one-sentence description on the Input Description() string - // Gather takes in an accumulator and adds the metrics that the Plugin + // Gather takes in an accumulator and adds the metrics that the Input // gathers. This is called every "interval" Gather(Accumulator) error } -type ServicePlugin interface { - // SampleConfig returns the default configuration of the Plugin +type ServiceInput interface { + // SampleConfig returns the default configuration of the Input SampleConfig() string - // Description returns a one-sentence description on the Plugin + // Description returns a one-sentence description on the Input Description() string - // Gather takes in an accumulator and adds the metrics that the Plugin + // Gather takes in an accumulator and adds the metrics that the Input // gathers. This is called every "interval" Gather(Accumulator) error - // Start starts the ServicePlugin's service, whatever that may be + // Start starts the ServiceInput's service, whatever that may be Start() error // Stop stops the services and closes any necessary channels and connections Stop() } -type Creator func() Plugin +type Creator func() Input -var Plugins = map[string]Creator{} +var Inputs = map[string]Creator{} func Add(name string, creator Creator) { - Plugins[name] = creator + Inputs[name] = creator } diff --git a/plugins/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go similarity index 89% rename from plugins/rethinkdb/rethinkdb.go rename to plugins/inputs/rethinkdb/rethinkdb.go index 8af890661..17873f1ce 100644 --- a/plugins/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -5,7 +5,7 @@ import ( "net/url" "sync" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "gopkg.in/dancannon/gorethink.v1" ) @@ -35,7 +35,7 @@ var localhost = &Server{Url: &url.URL{Host: "127.0.0.1:28015"}} // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (r *RethinkDB) Gather(acc plugins.Accumulator) error { +func (r *RethinkDB) Gather(acc inputs.Accumulator) error { if len(r.Servers) == 0 { r.gatherServer(localhost, acc) return nil @@ -65,7 +65,7 @@ func (r *RethinkDB) Gather(acc plugins.Accumulator) error { return outerr } -func (r *RethinkDB) gatherServer(server *Server, acc plugins.Accumulator) error { +func (r *RethinkDB) gatherServer(server *Server, acc inputs.Accumulator) error { var err error connectOpts := gorethink.ConnectOpts{ Address: server.Url.Host, @@ -87,7 +87,7 @@ func (r *RethinkDB) gatherServer(server *Server, acc plugins.Accumulator) error } func init() { - plugins.Add("rethinkdb", func() plugins.Plugin { + inputs.Add("rethinkdb", func() inputs.Input { return &RethinkDB{} }) } diff --git a/plugins/rethinkdb/rethinkdb_data.go b/plugins/inputs/rethinkdb/rethinkdb_data.go similarity index 95% rename from plugins/rethinkdb/rethinkdb_data.go rename to plugins/inputs/rethinkdb/rethinkdb_data.go index 59abd83ba..3ea429d82 100644 --- a/plugins/rethinkdb/rethinkdb_data.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data.go @@ -4,7 +4,7 @@ import ( "reflect" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type serverStatus struct { @@ -88,7 +88,7 @@ var engineStats = map[string]string{ func (e *Engine) AddEngineStats( keys []string, - acc plugins.Accumulator, + acc inputs.Accumulator, tags map[string]string, ) { engine := reflect.ValueOf(e).Elem() @@ -99,7 +99,7 @@ func (e *Engine) AddEngineStats( acc.AddFields("rethinkdb_engine", fields, tags) } -func (s *Storage) AddStats(acc plugins.Accumulator, tags map[string]string) { +func (s *Storage) AddStats(acc inputs.Accumulator, tags map[string]string) { fields := map[string]interface{}{ "cache_bytes_in_use": s.Cache.BytesInUse, "disk_read_bytes_per_sec": s.Disk.ReadBytesPerSec, diff --git a/plugins/rethinkdb/rethinkdb_data_test.go b/plugins/inputs/rethinkdb/rethinkdb_data_test.go similarity index 100% rename from plugins/rethinkdb/rethinkdb_data_test.go rename to plugins/inputs/rethinkdb/rethinkdb_data_test.go diff --git a/plugins/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go similarity index 94% rename from plugins/rethinkdb/rethinkdb_server.go rename to plugins/inputs/rethinkdb/rethinkdb_server.go index 9285068bd..4af916a4d 100644 --- a/plugins/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "gopkg.in/dancannon/gorethink.v1" ) @@ -20,7 +20,7 @@ type Server struct { serverStatus serverStatus } -func (s *Server) gatherData(acc plugins.Accumulator) error { +func (s *Server) gatherData(acc inputs.Accumulator) error { if err := s.getServerStatus(); err != nil { return fmt.Errorf("Failed to get server_status, %s\n", err) } @@ -110,7 +110,7 @@ var ClusterTracking = []string{ "written_docs_per_sec", } -func (s *Server) addClusterStats(acc plugins.Accumulator) error { +func (s *Server) addClusterStats(acc inputs.Accumulator) error { cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"cluster"}).Run(s.session) if err != nil { return fmt.Errorf("cluster stats query error, %s\n", err.Error()) @@ -138,7 +138,7 @@ var MemberTracking = []string{ "total_writes", } -func (s *Server) addMemberStats(acc plugins.Accumulator) error { +func (s *Server) addMemberStats(acc inputs.Accumulator) error { cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.Id}).Run(s.session) if err != nil { return fmt.Errorf("member stats query error, %s\n", err.Error()) @@ -162,7 +162,7 @@ var TableTracking = []string{ "total_writes", } -func (s *Server) addTableStats(acc plugins.Accumulator) error { +func (s *Server) addTableStats(acc inputs.Accumulator) error { tablesCursor, err := gorethink.DB("rethinkdb").Table("table_status").Run(s.session) defer tablesCursor.Close() var tables []tableStatus diff --git a/plugins/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go similarity index 100% rename from plugins/rethinkdb/rethinkdb_server_test.go rename to plugins/inputs/rethinkdb/rethinkdb_server_test.go diff --git a/plugins/rethinkdb/rethinkdb_test.go b/plugins/inputs/rethinkdb/rethinkdb_test.go similarity index 100% rename from plugins/rethinkdb/rethinkdb_test.go rename to plugins/inputs/rethinkdb/rethinkdb_test.go diff --git a/plugins/statsd/README.md b/plugins/inputs/statsd/README.md similarity index 100% rename from plugins/statsd/README.md rename to plugins/inputs/statsd/README.md diff --git a/plugins/statsd/running_stats.go b/plugins/inputs/statsd/running_stats.go similarity index 100% rename from plugins/statsd/running_stats.go rename to plugins/inputs/statsd/running_stats.go diff --git a/plugins/statsd/running_stats_test.go b/plugins/inputs/statsd/running_stats_test.go similarity index 100% rename from plugins/statsd/running_stats_test.go rename to plugins/inputs/statsd/running_stats_test.go diff --git a/plugins/statsd/statsd.go b/plugins/inputs/statsd/statsd.go similarity index 98% rename from plugins/statsd/statsd.go rename to plugins/inputs/statsd/statsd.go index bd86a56d7..d210b55fa 100644 --- a/plugins/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -12,7 +12,7 @@ import ( "github.com/influxdb/influxdb/services/graphite" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + @@ -139,7 +139,7 @@ func (_ *Statsd) SampleConfig() string { return sampleConfig } -func (s *Statsd) Gather(acc plugins.Accumulator) error { +func (s *Statsd) Gather(acc inputs.Accumulator) error { s.Lock() defer s.Unlock() @@ -490,7 +490,7 @@ func (s *Statsd) Stop() { } func init() { - plugins.Add("statsd", func() plugins.Plugin { + inputs.Add("statsd", func() inputs.Input { return &Statsd{} }) } diff --git a/plugins/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go similarity index 100% rename from plugins/statsd/statsd_test.go rename to plugins/inputs/statsd/statsd_test.go diff --git a/plugins/system/CPU_README.md b/plugins/inputs/system/CPU_README.md similarity index 100% rename from plugins/system/CPU_README.md rename to plugins/inputs/system/CPU_README.md diff --git a/plugins/system/MEM_README.md b/plugins/inputs/system/MEM_README.md similarity index 100% rename from plugins/system/MEM_README.md rename to plugins/inputs/system/MEM_README.md diff --git a/plugins/system/NETSTAT_README.md b/plugins/inputs/system/NETSTAT_README.md similarity index 100% rename from plugins/system/NETSTAT_README.md rename to plugins/inputs/system/NETSTAT_README.md diff --git a/plugins/system/cpu.go b/plugins/inputs/system/cpu.go similarity index 95% rename from plugins/system/cpu.go rename to plugins/inputs/system/cpu.go index 24350fc6c..298df20bb 100644 --- a/plugins/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" "github.com/shirou/gopsutil/cpu" ) @@ -39,7 +39,7 @@ func (_ *CPUStats) SampleConfig() string { return sampleConfig } -func (s *CPUStats) Gather(acc plugins.Accumulator) error { +func (s *CPUStats) Gather(acc inputs.Accumulator) error { times, err := s.ps.CPUTimes(s.PerCPU, s.TotalCPU) if err != nil { return fmt.Errorf("error getting CPU info: %s", err) @@ -111,7 +111,7 @@ func totalCpuTime(t cpu.CPUTimesStat) float64 { } func init() { - plugins.Add("cpu", func() plugins.Plugin { + inputs.Add("cpu", func() inputs.Input { return &CPUStats{ps: &systemPS{}} }) } diff --git a/plugins/system/cpu_test.go b/plugins/inputs/system/cpu_test.go similarity index 100% rename from plugins/system/cpu_test.go rename to plugins/inputs/system/cpu_test.go diff --git a/plugins/system/disk.go b/plugins/inputs/system/disk.go similarity index 91% rename from plugins/system/disk.go rename to plugins/inputs/system/disk.go index 410044d2c..5d1553dd4 100644 --- a/plugins/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -3,7 +3,7 @@ package system import ( "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type DiskStats struct { @@ -26,7 +26,7 @@ func (_ *DiskStats) SampleConfig() string { return diskSampleConfig } -func (s *DiskStats) Gather(acc plugins.Accumulator) error { +func (s *DiskStats) Gather(acc inputs.Accumulator) error { disks, err := s.ps.DiskUsage() if err != nil { return fmt.Errorf("error getting disk usage info: %s", err) @@ -88,7 +88,7 @@ func (_ *DiskIOStats) SampleConfig() string { return diskIoSampleConfig } -func (s *DiskIOStats) Gather(acc plugins.Accumulator) error { +func (s *DiskIOStats) Gather(acc inputs.Accumulator) error { diskio, err := s.ps.DiskIO() if err != nil { return fmt.Errorf("error getting disk io info: %s", err) @@ -134,11 +134,11 @@ func (s *DiskIOStats) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("disk", func() plugins.Plugin { + inputs.Add("disk", func() inputs.Input { return &DiskStats{ps: &systemPS{}} }) - plugins.Add("diskio", func() plugins.Plugin { + inputs.Add("diskio", func() inputs.Input { return &DiskIOStats{ps: &systemPS{}} }) } diff --git a/plugins/system/disk_test.go b/plugins/inputs/system/disk_test.go similarity index 100% rename from plugins/system/disk_test.go rename to plugins/inputs/system/disk_test.go diff --git a/plugins/system/docker.go b/plugins/inputs/system/docker.go similarity index 94% rename from plugins/system/docker.go rename to plugins/inputs/system/docker.go index 2d6146a59..3a77fad5f 100644 --- a/plugins/system/docker.go +++ b/plugins/inputs/system/docker.go @@ -5,7 +5,7 @@ package system import ( "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type DockerStats struct { @@ -18,7 +18,7 @@ func (_ *DockerStats) Description() string { func (_ *DockerStats) SampleConfig() string { return "" } -func (s *DockerStats) Gather(acc plugins.Accumulator) error { +func (s *DockerStats) Gather(acc inputs.Accumulator) error { containers, err := s.ps.DockerStat() if err != nil { return fmt.Errorf("error getting docker info: %s", err) @@ -83,7 +83,7 @@ func (s *DockerStats) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("docker", func() plugins.Plugin { + inputs.Add("docker", func() inputs.Input { return &DockerStats{ps: &systemPS{}} }) } diff --git a/plugins/system/docker_test.go b/plugins/inputs/system/docker_test.go similarity index 100% rename from plugins/system/docker_test.go rename to plugins/inputs/system/docker_test.go diff --git a/plugins/system/memory.go b/plugins/inputs/system/memory.go similarity index 85% rename from plugins/system/memory.go rename to plugins/inputs/system/memory.go index 23ce94608..f58a8cd92 100644 --- a/plugins/system/memory.go +++ b/plugins/inputs/system/memory.go @@ -3,7 +3,7 @@ package system import ( "fmt" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type MemStats struct { @@ -16,7 +16,7 @@ func (_ *MemStats) Description() string { func (_ *MemStats) SampleConfig() string { return "" } -func (s *MemStats) Gather(acc plugins.Accumulator) error { +func (s *MemStats) Gather(acc inputs.Accumulator) error { vm, err := s.ps.VMStat() if err != nil { return fmt.Errorf("error getting virtual memory info: %s", err) @@ -47,7 +47,7 @@ func (_ *SwapStats) Description() string { func (_ *SwapStats) SampleConfig() string { return "" } -func (s *SwapStats) Gather(acc plugins.Accumulator) error { +func (s *SwapStats) Gather(acc inputs.Accumulator) error { swap, err := s.ps.SwapStat() if err != nil { return fmt.Errorf("error getting swap memory info: %s", err) @@ -67,11 +67,11 @@ func (s *SwapStats) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("mem", func() plugins.Plugin { + inputs.Add("mem", func() inputs.Input { return &MemStats{ps: &systemPS{}} }) - plugins.Add("swap", func() plugins.Plugin { + inputs.Add("swap", func() inputs.Input { return &SwapStats{ps: &systemPS{}} }) } diff --git a/plugins/system/memory_test.go b/plugins/inputs/system/memory_test.go similarity index 100% rename from plugins/system/memory_test.go rename to plugins/inputs/system/memory_test.go diff --git a/plugins/system/mock_PS.go b/plugins/inputs/system/mock_PS.go similarity index 100% rename from plugins/system/mock_PS.go rename to plugins/inputs/system/mock_PS.go diff --git a/plugins/system/net.go b/plugins/inputs/system/net.go similarity index 93% rename from plugins/system/net.go rename to plugins/inputs/system/net.go index 72c450222..95df7a741 100644 --- a/plugins/system/net.go +++ b/plugins/inputs/system/net.go @@ -5,7 +5,7 @@ import ( "net" "strings" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type NetIOStats struct { @@ -31,7 +31,7 @@ func (_ *NetIOStats) SampleConfig() string { return netSampleConfig } -func (s *NetIOStats) Gather(acc plugins.Accumulator) error { +func (s *NetIOStats) Gather(acc inputs.Accumulator) error { netio, err := s.ps.NetIO() if err != nil { return fmt.Errorf("error getting net io info: %s", err) @@ -100,7 +100,7 @@ func (s *NetIOStats) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("net", func() plugins.Plugin { + inputs.Add("net", func() inputs.Input { return &NetIOStats{ps: &systemPS{}} }) } diff --git a/plugins/system/net_test.go b/plugins/inputs/system/net_test.go similarity index 100% rename from plugins/system/net_test.go rename to plugins/inputs/system/net_test.go diff --git a/plugins/system/netstat.go b/plugins/inputs/system/netstat.go similarity index 90% rename from plugins/system/netstat.go rename to plugins/inputs/system/netstat.go index bd28971bc..71f2a0da6 100644 --- a/plugins/system/netstat.go +++ b/plugins/inputs/system/netstat.go @@ -4,7 +4,7 @@ import ( "fmt" "syscall" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type NetStats struct { @@ -21,7 +21,7 @@ func (_ *NetStats) SampleConfig() string { return tcpstatSampleConfig } -func (s *NetStats) Gather(acc plugins.Accumulator) error { +func (s *NetStats) Gather(acc inputs.Accumulator) error { netconns, err := s.ps.NetConnections() if err != nil { return fmt.Errorf("error getting net connections info: %s", err) @@ -64,7 +64,7 @@ func (s *NetStats) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("netstat", func() plugins.Plugin { + inputs.Add("netstat", func() inputs.Input { return &NetStats{ps: &systemPS{}} }) } diff --git a/plugins/system/ps.go b/plugins/inputs/system/ps.go similarity index 97% rename from plugins/system/ps.go rename to plugins/inputs/system/ps.go index d0c35c62c..966747718 100644 --- a/plugins/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" dc "github.com/fsouza/go-dockerclient" "github.com/shirou/gopsutil/cpu" @@ -37,7 +37,7 @@ type PS interface { NetConnections() ([]net.NetConnectionStat, error) } -func add(acc plugins.Accumulator, +func add(acc inputs.Accumulator, name string, val float64, tags map[string]string) { if val >= 0 { acc.Add(name, val, tags) diff --git a/plugins/system/system.go b/plugins/inputs/system/system.go similarity index 88% rename from plugins/system/system.go rename to plugins/inputs/system/system.go index 1adf6c051..813ab84f5 100644 --- a/plugins/system/system.go +++ b/plugins/inputs/system/system.go @@ -8,7 +8,7 @@ import ( "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/load" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type SystemStats struct{} @@ -19,7 +19,7 @@ func (_ *SystemStats) Description() string { func (_ *SystemStats) SampleConfig() string { return "" } -func (_ *SystemStats) Gather(acc plugins.Accumulator) error { +func (_ *SystemStats) Gather(acc inputs.Accumulator) error { loadavg, err := load.LoadAvg() if err != nil { return err @@ -68,7 +68,7 @@ func format_uptime(uptime uint64) string { } func init() { - plugins.Add("system", func() plugins.Plugin { + inputs.Add("system", func() inputs.Input { return &SystemStats{} }) } diff --git a/plugins/trig/trig.go b/plugins/inputs/trig/trig.go similarity index 79% rename from plugins/trig/trig.go rename to plugins/inputs/trig/trig.go index 7ed2bf3d9..13c44e247 100644 --- a/plugins/trig/trig.go +++ b/plugins/inputs/trig/trig.go @@ -3,7 +3,7 @@ package trig import ( "math" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Trig struct { @@ -24,7 +24,7 @@ func (s *Trig) Description() string { return "Inserts sine and cosine waves for demonstration purposes" } -func (s *Trig) Gather(acc plugins.Accumulator) error { +func (s *Trig) Gather(acc inputs.Accumulator) error { sinner := math.Sin((s.x*math.Pi)/5.0) * s.Amplitude cosinner := math.Cos((s.x*math.Pi)/5.0) * s.Amplitude @@ -41,5 +41,5 @@ func (s *Trig) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("Trig", func() plugins.Plugin { return &Trig{x: 0.0} }) + inputs.Add("Trig", func() inputs.Input { return &Trig{x: 0.0} }) } diff --git a/plugins/trig/trig_test.go b/plugins/inputs/trig/trig_test.go similarity index 100% rename from plugins/trig/trig_test.go rename to plugins/inputs/trig/trig_test.go diff --git a/plugins/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go similarity index 93% rename from plugins/twemproxy/twemproxy.go rename to plugins/inputs/twemproxy/twemproxy.go index 268e465da..95c9d0ba0 100644 --- a/plugins/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -7,7 +7,7 @@ import ( "net" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Twemproxy struct { @@ -31,7 +31,7 @@ func (t *Twemproxy) Description() string { } // Gather data from all Twemproxy instances -func (t *Twemproxy) Gather(acc plugins.Accumulator) error { +func (t *Twemproxy) Gather(acc inputs.Accumulator) error { conn, err := net.DialTimeout("tcp", t.Addr, 1*time.Second) if err != nil { return err @@ -55,7 +55,7 @@ func (t *Twemproxy) Gather(acc plugins.Accumulator) error { // Process Twemproxy server stats func (t *Twemproxy) processStat( - acc plugins.Accumulator, + acc inputs.Accumulator, tags map[string]string, data map[string]interface{}, ) { @@ -89,7 +89,7 @@ func (t *Twemproxy) processStat( // Process pool data in Twemproxy stats func (t *Twemproxy) processPool( - acc plugins.Accumulator, + acc inputs.Accumulator, tags map[string]string, data map[string]interface{}, ) { @@ -117,7 +117,7 @@ func (t *Twemproxy) processPool( // Process backend server(redis/memcached) stats func (t *Twemproxy) processServer( - acc plugins.Accumulator, + acc inputs.Accumulator, tags map[string]string, data map[string]interface{}, ) { @@ -143,7 +143,7 @@ func copyTags(tags map[string]string) map[string]string { } func init() { - plugins.Add("twemproxy", func() plugins.Plugin { + inputs.Add("twemproxy", func() inputs.Input { return &Twemproxy{} }) } diff --git a/plugins/twemproxy/twemproxy_test.go b/plugins/inputs/twemproxy/twemproxy_test.go similarity index 100% rename from plugins/twemproxy/twemproxy_test.go rename to plugins/inputs/twemproxy/twemproxy_test.go diff --git a/plugins/zfs/README.md b/plugins/inputs/zfs/README.md similarity index 100% rename from plugins/zfs/README.md rename to plugins/inputs/zfs/README.md diff --git a/plugins/zfs/zfs.go b/plugins/inputs/zfs/zfs.go similarity index 93% rename from plugins/zfs/zfs.go rename to plugins/inputs/zfs/zfs.go index 3594f670b..109b261f8 100644 --- a/plugins/zfs/zfs.go +++ b/plugins/inputs/zfs/zfs.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) type Zfs struct { @@ -68,7 +68,7 @@ func getTags(pools []poolInfo) map[string]string { return map[string]string{"pools": poolNames} } -func gatherPoolStats(pool poolInfo, acc plugins.Accumulator) error { +func gatherPoolStats(pool poolInfo, acc inputs.Accumulator) error { lines, err := internal.ReadLines(pool.ioFilename) if err != nil { return err @@ -101,7 +101,7 @@ func gatherPoolStats(pool poolInfo, acc plugins.Accumulator) error { return nil } -func (z *Zfs) Gather(acc plugins.Accumulator) error { +func (z *Zfs) Gather(acc inputs.Accumulator) error { kstatMetrics := z.KstatMetrics if len(kstatMetrics) == 0 { kstatMetrics = []string{"arcstats", "zfetchstats", "vdev_cache_stats"} @@ -149,7 +149,7 @@ func (z *Zfs) Gather(acc plugins.Accumulator) error { } func init() { - plugins.Add("zfs", func() plugins.Plugin { + inputs.Add("zfs", func() inputs.Input { return &Zfs{} }) } diff --git a/plugins/zfs/zfs_test.go b/plugins/inputs/zfs/zfs_test.go similarity index 100% rename from plugins/zfs/zfs_test.go rename to plugins/inputs/zfs/zfs_test.go diff --git a/plugins/zookeeper/README.md b/plugins/inputs/zookeeper/README.md similarity index 100% rename from plugins/zookeeper/README.md rename to plugins/inputs/zookeeper/README.md diff --git a/plugins/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go similarity index 90% rename from plugins/zookeeper/zookeeper.go rename to plugins/inputs/zookeeper/zookeeper.go index 342bace2c..93a07840d 100644 --- a/plugins/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) // Zookeeper is a zookeeper plugin @@ -40,7 +40,7 @@ func (z *Zookeeper) Description() string { } // Gather reads stats from all configured servers accumulates stats -func (z *Zookeeper) Gather(acc plugins.Accumulator) error { +func (z *Zookeeper) Gather(acc inputs.Accumulator) error { if len(z.Servers) == 0 { return nil } @@ -53,7 +53,7 @@ func (z *Zookeeper) Gather(acc plugins.Accumulator) error { return nil } -func (z *Zookeeper) gatherServer(address string, acc plugins.Accumulator) error { +func (z *Zookeeper) gatherServer(address string, acc inputs.Accumulator) error { _, _, err := net.SplitHostPort(address) if err != nil { address = address + ":2181" @@ -103,7 +103,7 @@ func (z *Zookeeper) gatherServer(address string, acc plugins.Accumulator) error } func init() { - plugins.Add("zookeeper", func() plugins.Plugin { + inputs.Add("zookeeper", func() inputs.Input { return &Zookeeper{} }) } diff --git a/plugins/zookeeper/zookeeper_test.go b/plugins/inputs/zookeeper/zookeeper_test.go similarity index 100% rename from plugins/zookeeper/zookeeper_test.go rename to plugins/inputs/zookeeper/zookeeper_test.go diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go new file mode 100644 index 000000000..8a0d24f94 --- /dev/null +++ b/plugins/outputs/all/all.go @@ -0,0 +1,16 @@ +package all + +import ( + _ "github.com/influxdb/telegraf/plugins/outputs/amon" + _ "github.com/influxdb/telegraf/plugins/outputs/amqp" + _ "github.com/influxdb/telegraf/plugins/outputs/datadog" + _ "github.com/influxdb/telegraf/plugins/outputs/influxdb" + _ "github.com/influxdb/telegraf/plugins/outputs/kafka" + _ "github.com/influxdb/telegraf/plugins/outputs/kinesis" + _ "github.com/influxdb/telegraf/plugins/outputs/librato" + _ "github.com/influxdb/telegraf/plugins/outputs/mqtt" + _ "github.com/influxdb/telegraf/plugins/outputs/nsq" + _ "github.com/influxdb/telegraf/plugins/outputs/opentsdb" + _ "github.com/influxdb/telegraf/plugins/outputs/prometheus_client" + _ "github.com/influxdb/telegraf/plugins/outputs/riemann" +) diff --git a/outputs/amon/README.md b/plugins/outputs/amon/README.md similarity index 100% rename from outputs/amon/README.md rename to plugins/outputs/amon/README.md diff --git a/outputs/amon/amon.go b/plugins/outputs/amon/amon.go similarity index 98% rename from outputs/amon/amon.go rename to plugins/outputs/amon/amon.go index 84914a27c..d8fceb035 100644 --- a/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -10,7 +10,7 @@ import ( "github.com/influxdb/influxdb/client/v2" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" ) type Amon struct { diff --git a/outputs/amon/amon_test.go b/plugins/outputs/amon/amon_test.go similarity index 100% rename from outputs/amon/amon_test.go rename to plugins/outputs/amon/amon_test.go diff --git a/outputs/amqp/README.md b/plugins/outputs/amqp/README.md similarity index 100% rename from outputs/amqp/README.md rename to plugins/outputs/amqp/README.md diff --git a/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go similarity index 98% rename from outputs/amqp/amqp.go rename to plugins/outputs/amqp/amqp.go index a5c8c5a9f..6f0e0fde3 100644 --- a/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" "github.com/streadway/amqp" ) diff --git a/outputs/amqp/amqp_test.go b/plugins/outputs/amqp/amqp_test.go similarity index 100% rename from outputs/amqp/amqp_test.go rename to plugins/outputs/amqp/amqp_test.go diff --git a/outputs/datadog/README.md b/plugins/outputs/datadog/README.md similarity index 100% rename from outputs/datadog/README.md rename to plugins/outputs/datadog/README.md diff --git a/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go similarity index 98% rename from outputs/datadog/datadog.go rename to plugins/outputs/datadog/datadog.go index e654f9780..4231b1f28 100644 --- a/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -12,7 +12,7 @@ import ( "github.com/influxdb/influxdb/client/v2" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" ) type Datadog struct { diff --git a/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go similarity index 100% rename from outputs/datadog/datadog_test.go rename to plugins/outputs/datadog/datadog_test.go diff --git a/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md similarity index 100% rename from outputs/influxdb/README.md rename to plugins/outputs/influxdb/README.md diff --git a/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go similarity index 98% rename from outputs/influxdb/influxdb.go rename to plugins/outputs/influxdb/influxdb.go index 14391884d..f6b79b009 100644 --- a/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -11,7 +11,7 @@ import ( "github.com/influxdb/influxdb/client/v2" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" ) type InfluxDB struct { diff --git a/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go similarity index 100% rename from outputs/influxdb/influxdb_test.go rename to plugins/outputs/influxdb/influxdb_test.go diff --git a/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go similarity index 97% rename from outputs/kafka/kafka.go rename to plugins/outputs/kafka/kafka.go index fae955210..8e53cc511 100644 --- a/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -6,7 +6,7 @@ import ( "github.com/Shopify/sarama" "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" ) type Kafka struct { diff --git a/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go similarity index 100% rename from outputs/kafka/kafka_test.go rename to plugins/outputs/kafka/kafka_test.go diff --git a/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md similarity index 100% rename from outputs/kinesis/README.md rename to plugins/outputs/kinesis/README.md diff --git a/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go similarity index 98% rename from outputs/kinesis/kinesis.go rename to plugins/outputs/kinesis/kinesis.go index 144131707..11e26fdf9 100644 --- a/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -16,7 +16,7 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" ) type KinesisOutput struct { diff --git a/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go similarity index 100% rename from outputs/kinesis/kinesis_test.go rename to plugins/outputs/kinesis/kinesis_test.go diff --git a/outputs/librato/README.md b/plugins/outputs/librato/README.md similarity index 100% rename from outputs/librato/README.md rename to plugins/outputs/librato/README.md diff --git a/outputs/librato/librato.go b/plugins/outputs/librato/librato.go similarity index 98% rename from outputs/librato/librato.go rename to plugins/outputs/librato/librato.go index a653ce196..75aecb756 100644 --- a/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -9,7 +9,7 @@ import ( "github.com/influxdb/influxdb/client/v2" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" ) type Librato struct { diff --git a/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go similarity index 100% rename from outputs/librato/librato_test.go rename to plugins/outputs/librato/librato_test.go diff --git a/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go similarity index 98% rename from outputs/mqtt/mqtt.go rename to plugins/outputs/mqtt/mqtt.go index aa6e17bc7..a70b2e575 100644 --- a/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -12,7 +12,7 @@ import ( paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" "github.com/influxdb/influxdb/client/v2" "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" ) const MaxClientIdLen = 8 diff --git a/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go similarity index 100% rename from outputs/mqtt/mqtt_test.go rename to plugins/outputs/mqtt/mqtt_test.go diff --git a/outputs/nsq/README.md b/plugins/outputs/nsq/README.md similarity index 100% rename from outputs/nsq/README.md rename to plugins/outputs/nsq/README.md diff --git a/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go similarity index 96% rename from outputs/nsq/nsq.go rename to plugins/outputs/nsq/nsq.go index 65b139c77..db58670a2 100644 --- a/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -3,7 +3,7 @@ package nsq import ( "fmt" "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" "github.com/nsqio/go-nsq" ) diff --git a/outputs/nsq/nsq_test.go b/plugins/outputs/nsq/nsq_test.go similarity index 100% rename from outputs/nsq/nsq_test.go rename to plugins/outputs/nsq/nsq_test.go diff --git a/outputs/opentsdb/README.md b/plugins/outputs/opentsdb/README.md similarity index 100% rename from outputs/opentsdb/README.md rename to plugins/outputs/opentsdb/README.md diff --git a/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go similarity index 98% rename from outputs/opentsdb/opentsdb.go rename to plugins/outputs/opentsdb/opentsdb.go index 0146458dc..236385d71 100644 --- a/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" ) type OpenTSDB struct { diff --git a/outputs/opentsdb/opentsdb_test.go b/plugins/outputs/opentsdb/opentsdb_test.go similarity index 100% rename from outputs/opentsdb/opentsdb_test.go rename to plugins/outputs/opentsdb/opentsdb_test.go diff --git a/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md similarity index 100% rename from outputs/prometheus_client/README.md rename to plugins/outputs/prometheus_client/README.md diff --git a/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go similarity index 98% rename from outputs/prometheus_client/prometheus_client.go rename to plugins/outputs/prometheus_client/prometheus_client.go index 6df69a0a4..1fbf9056a 100644 --- a/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -6,7 +6,7 @@ import ( "net/http" "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" "github.com/prometheus/client_golang/prometheus" ) diff --git a/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go similarity index 97% rename from outputs/prometheus_client/prometheus_client_test.go rename to plugins/outputs/prometheus_client/prometheus_client_test.go index 53adcac17..dc353486c 100644 --- a/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/prometheus" + "github.com/influxdb/telegraf/plugins/inputs/prometheus" "github.com/influxdb/telegraf/testutil" ) diff --git a/outputs/registry.go b/plugins/outputs/registry.go similarity index 100% rename from outputs/registry.go rename to plugins/outputs/registry.go diff --git a/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go similarity index 97% rename from outputs/riemann/riemann.go rename to plugins/outputs/riemann/riemann.go index eaa0aab9c..afbde0051 100644 --- a/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -7,7 +7,7 @@ import ( "github.com/amir/raidman" "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/plugins/outputs" ) type Riemann struct { diff --git a/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go similarity index 100% rename from outputs/riemann/riemann_test.go rename to plugins/outputs/riemann/riemann_test.go diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index f00ac7d00..96319bf72 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -66,6 +66,6 @@ exit_if_fail "./telegraf -version | grep $VERSION" tmpdir=$(mktemp -d) ./telegraf -sample-config > $tmpdir/config.toml exit_if_fail ./telegraf -config $tmpdir/config.toml \ - -test -filter cpu:mem + -test -input-filter cpu:mem exit $rc diff --git a/scripts/init.sh b/scripts/init.sh index 91e9b47b3..8a0fd992c 100755 --- a/scripts/init.sh +++ b/scripts/init.sh @@ -137,9 +137,9 @@ case $1 in log_success_msg "Starting the process" "$name" if which start-stop-daemon > /dev/null 2>&1; then - start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config -configdirectory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & + start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & else - nohup $daemon -pidfile $pidfile -config $config -configdirectory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & + nohup $daemon -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & fi log_success_msg "$name process was started" ;; diff --git a/scripts/telegraf.service b/scripts/telegraf.service index d5e46b124..62fc23a25 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -6,7 +6,7 @@ After=network.target [Service] EnvironmentFile=-/etc/default/telegraf User=telegraf -ExecStart=/opt/telegraf/telegraf -config /etc/opt/telegraf/telegraf.conf -configdirectory /etc/opt/telegraf/telegraf.d $TELEGRAF_OPTS +ExecStart=/opt/telegraf/telegraf -config /etc/opt/telegraf/telegraf.conf -config-directory /etc/opt/telegraf/telegraf.d $TELEGRAF_OPTS Restart=on-failure KillMode=process From 7499c1f969608e30a94b4841bedb91339a186231 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Jan 2016 15:21:10 -0700 Subject: [PATCH 047/103] 0.3.0: update README and documentation --- CHANGELOG.md | 7 ++- CONFIGURATION.md | 48 ++++++++++------ CONTRIBUTING.md | 68 ++++++++--------------- README.md | 115 ++++++++++++++++++++------------------- cmd/telegraf/telegraf.go | 62 ++++++++++++++------- 5 files changed, 161 insertions(+), 139 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9856b5cf..310051ea1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,8 +11,11 @@ directory. - **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters for configuration. - **breaking change** `twemproxy` plugin: `prefix` option removed. -- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_` instead of -only `cpu_` +- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_` +instead of only `cpu_` +- **breaking change** some command-line flags have been renamed to separate words. +`-configdirectory` -> `-config-directory`, `-filter` -> `-input-filter`, +`-outputfilter` -> `-output-filter` - The prometheus plugin schema has not been changed (measurements have not been aggregated). diff --git a/CONFIGURATION.md b/CONFIGURATION.md index fc822d461..ef91f0a06 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -1,6 +1,6 @@ # Telegraf Configuration -## Generating a config file +## Generating a Configuration File A default Telegraf config file can be generated using the `-sample-config` flag, like this: `telegraf -sample-config` @@ -9,7 +9,20 @@ To generate a file with specific inputs and outputs, you can use the `-input-filter` and `-output-filter` flags, like this: `telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka` -## Plugin Configuration +## Telegraf Agent Configuration + +Telegraf has a few options you can configure under the `agent` section of the +config. + +* **hostname**: The hostname is passed as a tag. By default this will be +the value returned by `hostname` on the machine running Telegraf. +You can override that value here. +* **interval**: How often to gather metrics. Uses a simple number + +unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes. +* **debug**: Set to true to gather and send metrics to STDOUT as well as +InfluxDB. + +## Input Configuration There are some configuration options that are configurable per plugin: @@ -22,7 +35,7 @@ There are some configuration options that are configurable per plugin: global interval, but if one particular plugin should be run less or more often, you can configure that here. -### Plugin Filters +### Input Filters There are also filters that can be configured per plugin: @@ -36,7 +49,7 @@ match against the tag name, and if it matches the measurement is emitted. * **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not emitted. This is tested on measurements that have passed the tagpass test. -### Plugin Configuration Examples +### Input Configuration Examples This is a full working config that will output CPU data to an InfluxDB instance at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output @@ -57,8 +70,8 @@ fields which begin with `time_`. database = "telegraf" # required. precision = "s" -# PLUGINS -[plugins] +# INPUTS +[inputs] [[inputs.cpu]] percpu = true totalcpu = false @@ -66,10 +79,10 @@ fields which begin with `time_`. drop = ["time_*"] ``` -### Plugin Config: tagpass and tagdrop +### Input Config: tagpass and tagdrop ```toml -[plugins] +[inputs] [[inputs.cpu]] percpu = true totalcpu = false @@ -88,7 +101,7 @@ fields which begin with `time_`. path = [ "/opt", "/home*" ] ``` -### Plugin Config: pass and drop +### Input Config: pass and drop ```toml # Drop all metrics for guest & steal CPU usage @@ -102,7 +115,7 @@ fields which begin with `time_`. pass = ["inodes*"] ``` -### Plugin config: prefix, suffix, and override +### Input config: prefix, suffix, and override This plugin will emit measurements with the name `cpu_total` @@ -122,7 +135,7 @@ This will emit measurements with the name `foobar` totalcpu = true ``` -### Plugin config: tags +### Input config: tags This plugin will emit measurements with two additional tags: `tag1=foo` and `tag2=bar` @@ -136,10 +149,12 @@ This plugin will emit measurements with two additional tags: `tag1=foo` and tag2 = "bar" ``` -### Multiple plugins of the same type +### Multiple inputs of the same type -Additional plugins (or outputs) of the same type can be specified, -just define more instances in the config file: +Additional inputs (or outputs) of the same type can be specified, +just define more instances in the config file. It is highly recommended that +you utilize `name_override`, `name_prefix`, or `name_suffix` config options +to avoid measurement collisions: ```toml [[inputs.cpu]] @@ -149,6 +164,7 @@ just define more instances in the config file: [[inputs.cpu]] percpu = true totalcpu = false + name_override = "percpu_usage" drop = ["cpu_time*"] ``` @@ -158,8 +174,8 @@ Telegraf also supports specifying multiple output sinks to send data to, configuring each output sink is different, but examples can be found by running `telegraf -sample-config`. -Outputs also support the same configurable options as plugins -(pass, drop, tagpass, tagdrop), added in 0.2.4 +Outputs also support the same configurable options as inputs +(pass, drop, tagpass, tagdrop) ```toml [[outputs.influxdb]] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 12dcad1d0..a47ad2f17 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,7 +3,7 @@ Before we can merge a pull request, you will need to sign the CLA, which can be found [on our website](http://influxdb.com/community/cla.html) -## Plugins +## Input Plugins This section is for developers who want to create new collection inputs. Telegraf is entirely plugin driven. This interface allows for operators to @@ -13,23 +13,21 @@ to create new ways of generating metrics. Plugin authorship is kept as simple as possible to promote people to develop and submit new inputs. -### Plugin Guidelines +### Input Plugin Guidelines * A plugin must conform to the `inputs.Input` interface. -* Each generated metric automatically has the name of the plugin that generated -it prepended. This is to keep plugins honest. -* Plugins should call `inputs.Add` in their `init` function to register themselves. +* Input Plugins should call `inputs.Add` in their `init` function to register themselves. See below for a quick example. -* To be available within Telegraf itself, plugins must add themselves to the +* Input Plugins must be added to the `github.com/influxdb/telegraf/plugins/inputs/all/all.go` file. * The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is include in `telegraf -sample-config`. * The `Description` function should say in one line what this plugin does. -### Plugin interface +### Input interface ```go -type Plugin interface { +type Input interface { SampleConfig() string Description() string Gather(Accumulator) error @@ -52,45 +50,25 @@ type Accumulator interface { The way that a plugin emits metrics is by interacting with the Accumulator. The `Add` function takes 3 arguments: -* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`. +* **measurement**: A string description of the metric. For instance `bytes_read` or ` +faults`. * **value**: A value for the metric. This accepts 5 different types of value: * **int**: The most common type. All int types are accepted but favor using `int64` Useful for counters, etc. * **float**: Favor `float64`, useful for gauges, percentages, etc. - * **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc. - * **string**: Typically used to indicate a message, or some kind of freeform information. - * **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`. + * **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, + etc. + * **string**: Typically used to indicate a message, or some kind of freeform + information. + * **time.Time**: Useful for indicating when a state last occurred, for instance ` + light_on_since`. * **tags**: This is a map of strings to strings to describe the where or who about the metric. For instance, the `net` plugin adds a tag named `"interface"` set to the name of the network interface, like `"eth0"`. -The `AddFieldsWithTime` allows multiple values for a point to be passed. The values -used are the same type profile as **value** above. The **timestamp** argument -allows a point to be registered as having occurred at an arbitrary time. - Let's say you've written a plugin that emits metrics about processes on the current host. -```go - -type Process struct { - CPUTime float64 - MemoryBytes int64 - PID int -} - -func Gather(acc inputs.Accumulator) error { - for _, process := range system.Processes() { - tags := map[string]string { - "pid": fmt.Sprintf("%d", process.Pid), - } - - acc.Add("cpu", process.CPUTime, tags, time.Now()) - acc.Add("memory", process.MemoryBytes, tags, time.Now()) - } -} -``` - -### Plugin Example +### Input Plugin Example ```go package simple @@ -126,15 +104,15 @@ func init() { } ``` -## Service Plugins +## Service Input Plugins This section is for developers who want to create new "service" collection inputs. A service plugin differs from a regular plugin in that it operates a background service while Telegraf is running. One example would be the `statsd` plugin, which operates a statsd server. -Service Plugins are substantially more complicated than a regular plugin, as they -will require threads and locks to verify data integrity. Service Plugins should +Service Input Plugins are substantially more complicated than a regular plugin, as they +will require threads and locks to verify data integrity. Service Input Plugins should be avoided unless there is no way to create their behavior with a regular plugin. Their interface is quite similar to a regular plugin, with the addition of `Start()` @@ -157,13 +135,13 @@ type ServicePlugin interface { } ``` -## Outputs +## Output Plugins This section is for developers who want to create a new output sink. Outputs are created in a similar manner as collection plugins, and their interface has similar constructs. -### Output Guidelines +### Output Plugin Guidelines * An output must conform to the `outputs.Output` interface. * Outputs should call `outputs.Add` in their `init` function to register themselves. @@ -230,7 +208,7 @@ func init() { ``` -## Service Outputs +## Service Output Plugins This section is for developers who want to create new "service" output. A service output differs from a regular output in that it operates a background service @@ -243,7 +221,7 @@ and `Stop()` methods. ### Service Output Guidelines * Same as the `Output` guidelines, except that they must conform to the -`inputs.ServiceOutput` interface. +`output.ServiceOutput` interface. ### Service Output interface @@ -274,7 +252,7 @@ which would take some time to replicate. To overcome this situation we've decided to use docker containers to provide a fast and reproducible environment to test those services which require it. For other situations -(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go ) +(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go) a simple mock will suffice. To execute Telegraf tests follow these simple steps: diff --git a/README.md b/README.md index 46f865951..e8a7cc6a3 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,35 @@ -# Telegraf - A native agent for InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) +# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) Telegraf is an agent written in Go for collecting metrics from the system it's -running on, or from other services, and writing them into InfluxDB. +running on, or from other services, and writing them into InfluxDB or other +[outputs](https://github.com/influxdata/telegraf#supported-output-plugins). Design goals are to have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics from well known services (like Hadoop, Postgres, or Redis) and third party APIs (like Mailchimp, AWS CloudWatch, or Google Analytics). -We'll eagerly accept pull requests for new plugins and will manage the set of -plugins that Telegraf supports. See the -[contributing guide](CONTRIBUTING.md) for instructions on -writing new inputs. +New input and output plugins are designed to be easy to contribute, +we'll eagerly accept pull +requests and will manage the set of plugins that Telegraf supports. +See the [contributing guide](CONTRIBUTING.md) for instructions on writing +new plugins. ## Installation: +NOTE: Telegraf 0.3.x is **not** backwards-compatible with previous versions of +telegraf, both in the database layout and the configuration file. 0.2.x will +continue to be supported, see below for download links. + +TODO: link to blog post about 0.3.x changes. + ### Linux deb and rpm packages: Latest: +* http://get.influxdb.org/telegraf/telegraf_0.3.0_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.3.0-1.x86_64.rpm + +0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb * http://get.influxdb.org/telegraf/telegraf-0.2.4-1.x86_64.rpm @@ -33,6 +45,11 @@ controlled via `systemctl [action] telegraf` ### Linux binaries: Latest: +* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.3.0.tar.gz +* http://get.influxdb.org/telegraf/telegraf_linux_386_0.3.0.tar.gz +* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.3.0.tar.gz + +0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz * http://get.influxdb.org/telegraf/telegraf_linux_386_0.2.4.tar.gz * http://get.influxdb.org/telegraf/telegraf_linux_arm_0.2.4.tar.gz @@ -51,32 +68,6 @@ brew update brew install telegraf ``` -### Version 0.3.0 Beta - -Version 0.3.0 will introduce many new breaking changes to Telegraf. For starters, -plugin measurements will be aggregated into fields. This means that there will no -longer be a `cpu_usage_idle` measurement, there will be a `cpu` measurement with -a `usage_idle` field. - -There will also be config file changes, meaning that your 0.2.x Telegraf config -files will no longer work properly. It is recommended that you use the -`-sample-config` flag to generate a new config file to see what the changes are. -You can also read the -[0.3.0 configuration guide](https://github.com/influxdb/telegraf/blob/0.3.0/CONFIGURATION.md) -to see some of the new features and options available. - -You can read more about the justifications for the aggregated measurements -[here](https://github.com/influxdb/telegraf/issues/152), and a more detailed -breakdown of the work [here](https://github.com/influxdb/telegraf/pull/437). -Once we're closer to a full release, there will be a detailed blog post -explaining all the changes. - -* http://get.influxdb.org/telegraf/telegraf_0.3.0-beta2_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.3.0_beta2-1.x86_64.rpm -* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.3.0-beta2.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_386_0.3.0-beta2.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.3.0-beta2.tar.gz - ### From Source: Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm), @@ -91,37 +82,49 @@ if you don't have it already. You also must build with golang version 1.4+. ### How to use it: -* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration. -* Or run `telegraf -sample-config -input-filter cpu:mem -output-filter influxdb > telegraf.conf`. -to create a config file with only CPU and memory plugins defined, and InfluxDB -output defined. -* Edit the configuration to match your needs. -* Run `telegraf -config telegraf.conf -test` to output one full measurement -sample to STDOUT. NOTE: you may want to run as the telegraf user if you are using -the linux packages `sudo -u telegraf telegraf -config telegraf.conf -test` -* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs. -* Run `telegraf -config telegraf.conf -input-filter system:swap`. -to run telegraf with only the system & swap plugins defined in the config. +```console +$ telegraf -help +Telegraf, The plugin-driven server agent for reporting metrics into InfluxDB -## Telegraf Options +Usage: -Telegraf has a few options you can configure under the `agent` section of the -config. + telegraf -* **hostname**: The hostname is passed as a tag. By default this will be -the value returned by `hostname` on the machine running Telegraf. -You can override that value here. -* **interval**: How often to gather metrics. Uses a simple number + -unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes. -* **debug**: Set to true to gather and send metrics to STDOUT as well as -InfluxDB. +The flags are: + + -config configuration file to load + -test gather metrics once, print them to stdout, and exit + -sample-config print out full sample configuration to stdout + -config-directory directory containing additional *.conf files + -input-filter filter the input plugins to enable, separator is : + -output-filter filter the output plugins to enable, separator is : + -usage print usage for a plugin, ie, 'telegraf -usage mysql' + -version print the version to stdout + +Examples: + + # generate a telegraf config file: + telegraf -sample-config > telegraf.conf + + # generate config with only cpu input & influxdb output plugins defined + telegraf -sample-config -input-filter cpu -output-filter influxdb + + # run a single telegraf collection, outputing metrics to stdout + telegraf -config telegraf.conf -test + + # run telegraf with all plugins defined in config file + telegraf -config telegraf.conf + + # run telegraf, enabling the cpu & memory input, and influxdb output plugins + telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb +``` ## Configuration See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced configuration options. -## Supported Plugins +## Supported Input Plugins **You can view usage instructions for each plugin by running** `telegraf -usage `. @@ -166,7 +169,7 @@ Telegraf currently has support for collecting metrics from: * diskio * swap -## Supported Service Plugins +## Supported Input Service Plugins Telegraf can collect metrics via the following services: @@ -176,7 +179,7 @@ Telegraf can collect metrics via the following services: We'll be adding support for many more over the coming months. Read on if you want to add support for another service or third-party API. -## Supported Outputs +## Supported Output Plugins * influxdb * nsq diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index cc54dbc88..21e89ce04 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -31,43 +31,50 @@ var fOutputFilters = flag.String("output-filter", "", var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") +var fInputFiltersLegacy = flag.String("filter", "", + "filter the plugins to enable, separator is :") +var fOutputFiltersLegacy = flag.String("outputfilter", "", + "filter the outputs to enable, separator is :") +var fConfigDirectoryLegacy = flag.String("configdirectory", "", + "directory containing additional *.conf files") + // Telegraf version // -ldflags "-X main.Version=`git describe --always --tags`" var Version string -const usage = `Telegraf, The plugin-driven server agent for reporting metrics into InfluxDB +const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics. Usage: - telegraf + telegraf The flags are: - -config configuration file to load - -test gather metrics once, print them to stdout, and exit - -sample-config print out full sample configuration to stdout - -config-directory directory containing additional *.conf files - -input-filter filter the input plugins to enable, separator is : - -output-filter filter the output plugins to enable, separator is : - -usage print usage for a plugin, ie, 'telegraf -usage mysql' - -version print the version to stdout + -config configuration file to load + -test gather metrics once, print them to stdout, and exit + -sample-config print out full sample configuration to stdout + -config-directory directory containing additional *.conf files + -input-filter filter the input plugins to enable, separator is : + -output-filter filter the output plugins to enable, separator is : + -usage print usage for a plugin, ie, 'telegraf -usage mysql' + -version print the version to stdout Examples: - # generate a telegraf config file: - telegraf -sample-config > telegraf.conf + # generate a telegraf config file: + telegraf -sample-config > telegraf.conf - # generate a telegraf config file with only cpu input and influxdb output enabled - telegraf -sample-config -input-filter cpu -output-filter influxdb + # generate config with only cpu input & influxdb output plugins defined + telegraf -sample-config -input-filter cpu -output-filter influxdb - # run a single telegraf collection, outputting metrics to stdout - telegraf -config telegraf.conf -test + # run a single telegraf collection, outputing metrics to stdout + telegraf -config telegraf.conf -test - # run telegraf with all plugins defined in config file - telegraf -config telegraf.conf + # run telegraf with all plugins defined in config file + telegraf -config telegraf.conf - # run telegraf, enabling only the cpu and memory inputs and influxdb output - telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb + # run telegraf, enabling the cpu & memory input, and influxdb output plugins + telegraf -config telegraf.conf -input-filter cpu:mem -output-filter influxdb ` func main() { @@ -79,12 +86,20 @@ func main() { } var inputFilters []string + if *fInputFiltersLegacy != "" { + inputFilter := strings.TrimSpace(*fInputFiltersLegacy) + inputFilters = strings.Split(":"+inputFilter+":", ":") + } if *fInputFilters != "" { inputFilter := strings.TrimSpace(*fInputFilters) inputFilters = strings.Split(":"+inputFilter+":", ":") } var outputFilters []string + if *fOutputFiltersLegacy != "" { + outputFilter := strings.TrimSpace(*fOutputFiltersLegacy) + outputFilters = strings.Split(":"+outputFilter+":", ":") + } if *fOutputFilters != "" { outputFilter := strings.TrimSpace(*fOutputFilters) outputFilters = strings.Split(":"+outputFilter+":", ":") @@ -129,6 +144,13 @@ func main() { return } + if *fConfigDirectoryLegacy != "" { + err = c.LoadDirectory(*fConfigDirectoryLegacy) + if err != nil { + log.Fatal(err) + } + } + if *fConfigDirectory != "" { err = c.LoadDirectory(*fConfigDirectory) if err != nil { From 6496d185ab7f6fd564893825717c9c26435ea9a7 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 8 Jan 2016 12:49:50 -0700 Subject: [PATCH 048/103] add backwards-compatability for 'plugins', remove [inputs] and [outputs] headers --- CHANGELOG.md | 2 +- CONFIGURATION.md | 17 +++++++---------- etc/telegraf.conf | 4 ---- internal/config/config.go | 4 +--- internal/config/testdata/telegraf-agent.toml | 4 ---- 5 files changed, 9 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 310051ea1..dbb364bac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ be "aggregator plugins" and "filter plugins" in the future. Additionally, `inputs/` and `outputs/` directories have been placed in the root-level `plugins/` directory. - **breaking change** the `io` plugin has been renamed `diskio` -- **breaking change** Plugin measurements aggregated into a single measurement. +- **breaking change** plugin measurements aggregated into a single measurement. - **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters for configuration. - **breaking change** `twemproxy` plugin: `prefix` option removed. diff --git a/CONFIGURATION.md b/CONFIGURATION.md index ef91f0a06..ee79da98a 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -24,27 +24,27 @@ InfluxDB. ## Input Configuration -There are some configuration options that are configurable per plugin: +There are some configuration options that are configurable per input: * **name_override**: Override the base name of the measurement. -(Default is the name of the plugin). +(Default is the name of the input). * **name_prefix**: Specifies a prefix to attach to the measurement name. * **name_suffix**: Specifies a suffix to attach to the measurement name. -* **tags**: A map of tags to apply to a specific plugin's measurements. +* **tags**: A map of tags to apply to a specific input's measurements. * **interval**: How often to gather this metric. Normal plugins use a single -global interval, but if one particular plugin should be run less or more often, +global interval, but if one particular input should be run less or more often, you can configure that here. ### Input Filters -There are also filters that can be configured per plugin: +There are also filters that can be configured per input: * **pass**: An array of strings that is used to filter metrics generated by the -current plugin. Each string in the array is tested as a glob match against field names +current input. Each string in the array is tested as a glob match against field names and if it matches, the field is emitted. * **drop**: The inverse of pass, if a field name matches, it is not emitted. * **tagpass**: tag names and arrays of strings that are used to filter -measurements by the current plugin. Each string in the array is tested as a glob +measurements by the current input. Each string in the array is tested as a glob match against the tag name, and if it matches the measurement is emitted. * **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not emitted. This is tested on measurements that have passed the tagpass test. @@ -64,14 +64,12 @@ fields which begin with `time_`. interval = "10s" # OUTPUTS -[outputs] [[outputs.influxdb]] url = "http://192.168.59.103:8086" # required. database = "telegraf" # required. precision = "s" # INPUTS -[inputs] [[inputs.cpu]] percpu = true totalcpu = false @@ -82,7 +80,6 @@ fields which begin with `time_`. ### Input Config: tagpass and tagdrop ```toml -[inputs] [[inputs.cpu]] percpu = true totalcpu = false diff --git a/etc/telegraf.conf b/etc/telegraf.conf index eb50005a5..9df2e93d5 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -49,8 +49,6 @@ # OUTPUTS # ############################################################################### -[outputs] - # Configuration for influxdb server to send metrics to [[outputs.influxdb]] # The full HTTP or UDP endpoint URL for your InfluxDB instance. @@ -79,8 +77,6 @@ # INPUTS # ############################################################################### -[inputs] - # Read metrics about cpu usage [[inputs.cpu]] # Whether to report per-cpu stats or not diff --git a/internal/config/config.go b/internal/config/config.go index e96856015..6c3d17750 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -287,7 +287,6 @@ var header = `# Telegraf configuration # OUTPUTS # ############################################################################### -[outputs] ` var pluginHeader = ` @@ -296,7 +295,6 @@ var pluginHeader = ` # INPUTS # ############################################################################### -[inputs] ` var serviceInputHeader = ` @@ -470,7 +468,7 @@ func (c *Config) LoadConfig(path string) error { pluginName) } } - case "inputs": + case "inputs", "plugins": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { case *ast.Table: diff --git a/internal/config/testdata/telegraf-agent.toml b/internal/config/testdata/telegraf-agent.toml index 3c279db34..5ede47016 100644 --- a/internal/config/testdata/telegraf-agent.toml +++ b/internal/config/testdata/telegraf-agent.toml @@ -39,8 +39,6 @@ # OUTPUTS # ############################################################################### -[outputs] - # Configuration for influxdb server to send metrics to [[outputs.influxdb]] # The full HTTP endpoint URL for your InfluxDB instance @@ -70,8 +68,6 @@ # PLUGINS # ############################################################################### -[inputs] - # Read Apache status information (mod_status) [[inputs.apache]] # An array of Apache status URI to gather stats. From fd6daaa73b7a6db07644b7804e848b817ae204f5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 7 Jan 2016 15:21:10 -0700 Subject: [PATCH 049/103] 0.3.0: update README and documentation --- CONFIGURATION.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CONFIGURATION.md b/CONFIGURATION.md index ee79da98a..60a18e18b 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -70,6 +70,10 @@ fields which begin with `time_`. precision = "s" # INPUTS +<<<<<<< HEAD +======= +[inputs] +>>>>>>> ac13af0... 0.3.0: update README and documentation [[inputs.cpu]] percpu = true totalcpu = false @@ -80,6 +84,10 @@ fields which begin with `time_`. ### Input Config: tagpass and tagdrop ```toml +<<<<<<< HEAD +======= +[inputs] +>>>>>>> ac13af0... 0.3.0: update README and documentation [[inputs.cpu]] percpu = true totalcpu = false From 07b4a4dbca76d68b7b49b6559c82f173a9715c96 Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Fri, 8 Jan 2016 09:47:09 -0600 Subject: [PATCH 050/103] Added a `build.py` script for compiling and packaging. Added post and pre install scripts to handle installation and upgrades in a cleaner way. Minor fixes to the init script and service unit file. --- .gitignore | 2 + CONFIGURATION.md | 8 - build.py | 671 +++++++++++++++++++++++++++++++++++++++ scripts/init.sh | 11 +- scripts/post-install.sh | 69 ++++ scripts/pre-install.sh | 14 + scripts/telegraf.service | 2 +- 7 files changed, 762 insertions(+), 15 deletions(-) create mode 100755 build.py create mode 100644 scripts/post-install.sh create mode 100644 scripts/pre-install.sh diff --git a/.gitignore b/.gitignore index d432f6865..7d27d694e 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ tivan .vagrant /telegraf .idea +*~ +*# diff --git a/CONFIGURATION.md b/CONFIGURATION.md index 60a18e18b..ee79da98a 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -70,10 +70,6 @@ fields which begin with `time_`. precision = "s" # INPUTS -<<<<<<< HEAD -======= -[inputs] ->>>>>>> ac13af0... 0.3.0: update README and documentation [[inputs.cpu]] percpu = true totalcpu = false @@ -84,10 +80,6 @@ fields which begin with `time_`. ### Input Config: tagpass and tagdrop ```toml -<<<<<<< HEAD -======= -[inputs] ->>>>>>> ac13af0... 0.3.0: update README and documentation [[inputs.cpu]] percpu = true totalcpu = false diff --git a/build.py b/build.py new file mode 100755 index 000000000..1186a239b --- /dev/null +++ b/build.py @@ -0,0 +1,671 @@ +#!/usr/bin/env python2.7 +# +# This is the Telegraf build script. +# +# Current caveats: +# - Does not checkout the correct commit/branch (for now, you will need to do so manually) +# - Has external dependencies for packaging (fpm) and uploading (boto) +# + +import sys +import os +import subprocess +import time +import datetime +import shutil +import tempfile +import hashlib +import re + +try: + import boto + from boto.s3.key import Key +except ImportError: + pass + +# PACKAGING VARIABLES +INSTALL_ROOT_DIR = "/usr/bin" +LOG_DIR = "/var/log/telegraf" +DATA_DIR = "/var/lib/telegraf" +SCRIPT_DIR = "/usr/lib/telegraf/scripts" +CONFIG_DIR = "/etc/telegraf" +LOGROTATE_DIR = "/etc/logrotate.d" + +INIT_SCRIPT = "scripts/init.sh" +SYSTEMD_SCRIPT = "scripts/telegraf.service" +LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf" +DEFAULT_CONFIG = "etc/telegraf.conf" +POSTINST_SCRIPT = "scripts/post-install.sh" +PREINST_SCRIPT = "scripts/pre-install.sh" + +# META-PACKAGE VARIABLES +PACKAGE_LICENSE = "MIT" +PACKAGE_URL = "https://github.com/influxdata/telegraf" +MAINTAINER = "support@influxdb.com" +VENDOR = "InfluxData" +DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB." + +# SCRIPT START +prereqs = [ 'git', 'go' ] +optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ] + +fpm_common_args = "-f -s dir --log error \ + --vendor {} \ + --url {} \ + --license {} \ + --maintainer {} \ + --config-files {} \ + --config-files {} \ + --after-install {} \ + --before-install {} \ + --description \"{}\"".format( + VENDOR, + PACKAGE_URL, + PACKAGE_LICENSE, + MAINTAINER, + CONFIG_DIR + '/telegraf.conf', + LOGROTATE_DIR + '/telegraf', + POSTINST_SCRIPT, + PREINST_SCRIPT, + DESCRIPTION) + +targets = { + 'telegraf' : './cmd/telegraf/telegraf.go', +} + +supported_builds = { + # TODO(rossmcdonald): Add support for multiple GOARM values + 'darwin': [ "amd64", "386" ], + # 'windows': [ "amd64", "386", "arm", "arm64" ], + 'linux': [ "amd64", "386", "arm" ] +} +supported_go = [ '1.5.1' ] +supported_packages = { + "darwin": [ "tar", "zip" ], + "linux": [ "deb", "rpm", "tar", "zip" ], + "windows": [ "tar", "zip" ], +} + +def run(command, allow_failure=False, shell=False): + out = None + try: + if shell: + out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) + else: + out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + print "" + print "" + print "Executed command failed!" + print "-- Command run was: {}".format(command) + print "-- Failure was: {}".format(e.output) + if allow_failure: + print "Continuing..." + return None + else: + print "" + print "Stopping." + sys.exit(1) + except OSError as e: + print "" + print "" + print "Invalid command!" + print "-- Command run was: {}".format(command) + print "-- Failure was: {}".format(e) + if allow_failure: + print "Continuing..." + return out + else: + print "" + print "Stopping." + sys.exit(1) + else: + return out + +def create_temp_dir(): + return tempfile.mkdtemp(prefix="telegraf-build.") + +def get_current_version(): + command = "git describe --always --tags --abbrev=0" + out = run(command) + return out.strip() + +def get_current_commit(short=False): + command = None + if short: + command = "git log --pretty=format:'%h' -n 1" + else: + command = "git rev-parse HEAD" + out = run(command) + return out.strip('\'\n\r ') + +def get_current_branch(): + command = "git rev-parse --abbrev-ref HEAD" + out = run(command) + return out.strip() + +def get_system_arch(): + arch = os.uname()[4] + if arch == "x86_64": + arch = "amd64" + return arch + +def get_system_platform(): + if sys.platform.startswith("linux"): + return "linux" + else: + return sys.platform + +def get_go_version(): + out = run("go version") + matches = re.search('go version go(\S+)', out) + if matches is not None: + return matches.groups()[0].strip() + return None + +def check_path_for(b): + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + full_path = os.path.join(path, b) + if os.path.isfile(full_path) and os.access(full_path, os.X_OK): + return full_path + +def check_environ(build_dir = None): + print "\nChecking environment:" + for v in [ "GOPATH", "GOBIN", "GOROOT" ]: + print "\t- {} -> {}".format(v, os.environ.get(v)) + + cwd = os.getcwd() + if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: + print "\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures." + +def check_prereqs(): + print "\nChecking for dependencies:" + for req in prereqs: + print "\t- {} ->".format(req), + path = check_path_for(req) + if path: + print "{}".format(path) + else: + print "?" + for req in optional_prereqs: + print "\t- {} (optional) ->".format(req), + path = check_path_for(req) + if path: + print "{}".format(path) + else: + print "?" + print "" + +def upload_packages(packages, nightly=False): + print "Uploading packages to S3..." + print "" + c = boto.connect_s3() + # TODO(rossmcdonald) - Set to different S3 bucket for release vs nightly + bucket = c.get_bucket('telegraf-nightly') + for p in packages: + name = os.path.basename(p) + if bucket.get_key(name) is None or nightly: + print "\t - Uploading {}...".format(name), + k = Key(bucket) + k.key = name + if nightly: + n = k.set_contents_from_filename(p, replace=True) + else: + n = k.set_contents_from_filename(p, replace=False) + k.make_public() + print "[ DONE ]" + else: + print "\t - Not uploading {}, already exists.".format(p) + print "" + +def run_tests(race, parallel, timeout, no_vet): + get_command = "go get -d -t ./..." + print "Retrieving Go dependencies...", + sys.stdout.flush() + run(get_command) + print "done." + print "Running tests:" + print "\tRace: ", race + if parallel is not None: + print "\tParallel:", parallel + if timeout is not None: + print "\tTimeout:", timeout + sys.stdout.flush() + p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + if len(out) > 0 or len(err) > 0: + print "Code not formatted. Please use 'go fmt ./...' to fix formatting errors." + print out + print err + return False + if not no_vet: + p = subprocess.Popen(["go", "tool", "vet", "-composites=false", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + if len(out) > 0 or len(err) > 0: + print "Go vet failed. Please run 'go vet ./...' and fix any errors." + print out + print err + return False + else: + print "Skipping go vet ..." + sys.stdout.flush() + test_command = "go test -v" + if race: + test_command += " -race" + if parallel is not None: + test_command += " -parallel {}".format(parallel) + if timeout is not None: + test_command += " -timeout {}".format(timeout) + test_command += " ./..." + code = os.system(test_command) + if code != 0: + print "Tests Failed" + return False + else: + print "Tests Passed" + return True + +def build(version=None, + branch=None, + commit=None, + platform=None, + arch=None, + nightly=False, + rc=None, + race=False, + clean=False, + outdir=".", + goarm_version="6"): + print "-------------------------" + print "" + print "Build plan:" + print "\t- version: {}".format(version) + if rc: + print "\t- release candidate: {}".format(rc) + print "\t- commit: {}".format(commit) + print "\t- branch: {}".format(branch) + print "\t- platform: {}".format(platform) + print "\t- arch: {}".format(arch) + if arch == 'arm' and goarm_version: + print "\t- ARM version: {}".format(goarm_version) + print "\t- nightly? {}".format(str(nightly).lower()) + print "\t- race enabled? {}".format(str(race).lower()) + print "" + + if not os.path.exists(outdir): + os.makedirs(outdir) + elif clean and outdir != '/': + print "Cleaning build directory..." + shutil.rmtree(outdir) + os.makedirs(outdir) + + if rc: + # If a release candidate, update the version information accordingly + version = "{}rc{}".format(version, rc) + + print "Starting build..." + for b, c in targets.iteritems(): + print "\t- Building '{}'...".format(os.path.join(outdir, b)), + build_command = "" + build_command += "GOOS={} GOARCH={} ".format(platform, arch) + if arch == "arm" and goarm_version: + if goarm_version not in ["5", "6", "7", "arm64"]: + print "!! Invalid ARM build version: {}".format(goarm_version) + build_command += "GOARM={} ".format(goarm_version) + build_command += "go build -o {} ".format(os.path.join(outdir, b)) + if race: + build_command += "-race " + go_version = get_go_version() + if "1.4" in go_version: + build_command += "-ldflags=\"-X main.buildTime '{}' ".format(datetime.datetime.utcnow().isoformat()) + build_command += "-X main.Version {} ".format(version) + build_command += "-X main.Branch {} ".format(branch) + build_command += "-X main.Commit {}\" ".format(get_current_commit()) + else: + build_command += "-ldflags=\"-X main.buildTime='{}' ".format(datetime.datetime.utcnow().isoformat()) + build_command += "-X main.Version={} ".format(version) + build_command += "-X main.Branch={} ".format(branch) + build_command += "-X main.Commit={}\" ".format(get_current_commit()) + build_command += c + run(build_command, shell=True) + print "[ DONE ]" + print "" + +def create_dir(path): + try: + os.makedirs(path) + except OSError as e: + print e + +def rename_file(fr, to): + try: + os.rename(fr, to) + except OSError as e: + print e + # Return the original filename + return fr + else: + # Return the new filename + return to + +def copy_file(fr, to): + try: + shutil.copy(fr, to) + except OSError as e: + print e + +def create_package_fs(build_root): + print "\t- Creating a filesystem hierarchy from directory: {}".format(build_root) + # Using [1:] for the path names due to them being absolute + # (will overwrite previous paths, per 'os.path.join' documentation) + dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], DATA_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] + for d in dirs: + create_dir(os.path.join(build_root, d)) + os.chmod(os.path.join(build_root, d), 0755) + +def package_scripts(build_root): + print "\t- Copying scripts and sample configuration to build directory" + shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0644) + shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0644) + shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf")) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0644) + +def go_get(update=False): + get_command = None + if update: + get_command = "go get -u -f -d ./..." + else: + get_command = "go get -d ./..." + print "Retrieving Go dependencies...", + run(get_command) + print "done.\n" + +def generate_md5_from_file(path): + m = hashlib.md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + m.update(chunk) + return m.hexdigest() + +def build_packages(build_output, version, nightly=False, rc=None, iteration=1): + outfiles = [] + tmp_build_dir = create_temp_dir() + try: + print "-------------------------" + print "" + print "Packaging..." + for p in build_output: + # Create top-level folder displaying which platform (linux, etc) + create_dir(os.path.join(tmp_build_dir, p)) + for a in build_output[p]: + current_location = build_output[p][a] + # Create second-level directory displaying the architecture (amd64, etc)p + build_root = os.path.join(tmp_build_dir, p, a) + # Create directory tree to mimic file system of package + create_dir(build_root) + create_package_fs(build_root) + # Copy in packaging and miscellaneous scripts + package_scripts(build_root) + # Copy newly-built binaries to packaging directory + for b in targets: + if p == 'windows': + b = b + '.exe' + fr = os.path.join(current_location, b) + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b) + print "\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to) + copy_file(fr, to) + # Package the directory structure + for package_type in supported_packages[p]: + print "\t- Packaging directory '{}' as '{}'...".format(build_root, package_type), + name = "telegraf" + package_version = version + package_iteration = iteration + if package_type in ['zip', 'tar']: + if nightly: + name = '{}-nightly_{}_{}'.format(name, p, a) + else: + name = '{}-{}_{}_{}'.format(name, version, p, a) + if package_type == 'tar': + # Add `tar.gz` to path to reduce package size + current_location = os.path.join(current_location, name + '.tar.gz') + if rc is not None: + package_iteration = "0.rc{}".format(rc) + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( + fpm_common_args, + name, + a, + package_type, + package_version, + package_iteration, + build_root, + current_location) + if package_type == "rpm": + fpm_command += "--depends coreutils " + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + print "[ COULD NOT DETERMINE OUTPUT ]" + else: + # Strip nightly version (the unix epoch) from filename + if nightly and package_type in ['deb', 'rpm']: + outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) + outfiles.append(os.path.join(os.getcwd(), outfile)) + print "[ DONE ]" + # Display MD5 hash for generated package + print "\t\tMD5 = {}".format(generate_md5_from_file(outfile)) + print "" + return outfiles + finally: + # Cleanup + shutil.rmtree(tmp_build_dir) + +def print_usage(): + print "Usage: ./build.py [options]" + print "" + print "Options:" + print "\t --outdir= \n\t\t- Send build output to a specified path. Defaults to ./build." + print "\t --arch= \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all" + print "\t --goarm= \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6" + print "\t --platform= \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all" + print "\t --version= \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag." + print "\t --commit= \n\t\t- Use specific commit for build (currently a NOOP)." + print "\t --branch= \n\t\t- Build from a specific branch (currently a NOOP)." + print "\t --rc= \n\t\t- Whether or not the build is a release candidate (affects version information)." + print "\t --iteration= \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise)." + print "\t --race \n\t\t- Whether the produced build should have race detection enabled." + print "\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s)." + print "\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information)." + print "\t --update \n\t\t- Whether dependencies should be updated prior to building." + print "\t --test \n\t\t- Run Go tests. Will not produce a build." + print "\t --parallel \n\t\t- Run Go tests in parallel up to the count specified." + print "\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s." + print "\t --clean \n\t\t- Clean the build output directory prior to creating build." + print "" + +def print_package_summary(packages): + print packages + +def main(): + # Command-line arguments + outdir = "build" + commit = None + target_platform = None + target_arch = None + nightly = False + race = False + branch = None + version = get_current_version() + rc = None + package = False + update = False + clean = False + upload = False + test = False + parallel = None + timeout = None + iteration = 1 + no_vet = False + goarm_version = "6" + + for arg in sys.argv[1:]: + if '--outdir' in arg: + # Output directory. If none is specified, then builds will be placed in the same directory. + output_dir = arg.split("=")[1] + if '--commit' in arg: + # Commit to build from. If none is specified, then it will build from the most recent commit. + commit = arg.split("=")[1] + if '--branch' in arg: + # Branch to build from. If none is specified, then it will build from the current branch. + branch = arg.split("=")[1] + elif '--arch' in arg: + # Target architecture. If none is specified, then it will build for the current arch. + target_arch = arg.split("=")[1] + elif '--platform' in arg: + # Target platform. If none is specified, then it will build for the current platform. + target_platform = arg.split("=")[1] + elif '--version' in arg: + # Version to assign to this build (0.9.5, etc) + version = arg.split("=")[1] + elif '--rc' in arg: + # Signifies that this is a release candidate build. + rc = arg.split("=")[1] + elif '--race' in arg: + # Signifies that race detection should be enabled. + race = True + elif '--package' in arg: + # Signifies that packages should be built. + package = True + elif '--nightly' in arg: + # Signifies that this is a nightly build. + nightly = True + elif '--update' in arg: + # Signifies that dependencies should be updated. + update = True + elif '--upload' in arg: + # Signifies that the resulting packages should be uploaded to S3 + upload = True + elif '--test' in arg: + # Run tests and exit + test = True + elif '--parallel' in arg: + # Set parallel for tests. + parallel = int(arg.split("=")[1]) + elif '--timeout' in arg: + # Set timeout for tests. + timeout = arg.split("=")[1] + elif '--clean' in arg: + # Signifies that the outdir should be deleted before building + clean = True + elif '--iteration' in arg: + iteration = arg.split("=")[1] + elif '--no-vet' in arg: + no_vet = True + elif '--goarm' in arg: + # Signifies GOARM flag to pass to build command when compiling for ARM + goarm_version = arg.split("=")[1] + elif '--help' in arg: + print_usage() + return 0 + else: + print "!! Unknown argument: {}".format(arg) + print_usage() + return 1 + + if nightly: + if rc: + print "!! Cannot be both nightly and a release candidate! Stopping." + return 1 + # In order to support nightly builds on the repository, we are adding the epoch timestamp + # to the version so that version numbers are always greater than the previous nightly. + version = "{}.n{}".format(version, int(time.time())) + + # Pre-build checks + check_environ() + check_prereqs() + + if not commit: + commit = get_current_commit(short=True) + if not branch: + branch = get_current_branch() + if not target_arch: + if 'arm' in get_system_arch(): + # Prevent uname from reporting ARM arch (eg 'armv7l') + target_arch = "arm" + else: + target_arch = get_system_arch() + if not target_platform: + target_platform = get_system_platform() + if rc or nightly: + # If a release candidate or nightly, set iteration to 0 (instead of 1) + iteration = 0 + + build_output = {} + # TODO(rossmcdonald): Prepare git repo for build (checking out correct branch/commit, etc.) + # prepare(branch=branch, commit=commit) + if test: + if not run_tests(race, parallel, timeout, no_vet): + return 1 + return 0 + + go_get(update=update) + + platforms = [] + single_build = True + if target_platform == 'all': + platforms = supported_builds.keys() + single_build = False + else: + platforms = [target_platform] + + for platform in platforms: + build_output.update( { platform : {} } ) + archs = [] + if target_arch == "all": + single_build = False + archs = supported_builds.get(platform) + else: + archs = [target_arch] + for arch in archs: + od = outdir + if not single_build: + od = os.path.join(outdir, platform, arch) + build(version=version, + branch=branch, + commit=commit, + platform=platform, + arch=arch, + nightly=nightly, + rc=rc, + race=race, + clean=clean, + outdir=od, + goarm_version=goarm_version) + build_output.get(platform).update( { arch : od } ) + + # Build packages + if package: + if not check_path_for("fpm"): + print "!! Cannot package without command 'fpm'. Stopping." + return 1 + packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration) + # TODO(rossmcdonald): Add nice output for print_package_summary() + # print_package_summary(packages) + # Optionally upload to S3 + if upload: + upload_packages(packages, nightly=nightly) + return 0 + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/scripts/init.sh b/scripts/init.sh index 8a0fd992c..81932bb48 100755 --- a/scripts/init.sh +++ b/scripts/init.sh @@ -51,7 +51,6 @@ if [ ! -f "$STDERR" ]; then mkdir -p `dirname $STDERR` fi - OPEN_FILE_LIMIT=65536 function pidofproc() { @@ -98,7 +97,7 @@ function log_success_msg() { name=telegraf # Daemon name, where is the actual executable -daemon=/opt/telegraf/telegraf +daemon=/usr/bin/telegraf # pid file for the daemon pidfile=/var/run/telegraf/telegraf.pid @@ -106,12 +105,12 @@ piddir=`dirname $pidfile` if [ ! -d "$piddir" ]; then mkdir -p $piddir - chown $GROUP:$USER $piddir + chown $USER:$GROUP $piddir fi # Configuration file -config=/etc/opt/telegraf/telegraf.conf -confdir=/etc/opt/telegraf/telegraf.d +config=/etc/telegraf/telegraf.conf +confdir=/etc/telegraf/telegraf.d # If the daemon is not there, then exit. [ -x $daemon ] || exit 5 @@ -137,7 +136,7 @@ case $1 in log_success_msg "Starting the process" "$name" if which start-stop-daemon > /dev/null 2>&1; then - start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & + start-stop-daemon --chuid $USER:$GROUP --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & else nohup $daemon -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & fi diff --git a/scripts/post-install.sh b/scripts/post-install.sh new file mode 100644 index 000000000..19b895808 --- /dev/null +++ b/scripts/post-install.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +BIN_DIR=/usr/bin +LOG_DIR=/var/log/telegraf +SCRIPT_DIR=/usr/lib/telegraf/scripts +LOGROTATE_DIR=/etc/logrotate.d + +function install_init { + cp -f $SCRIPT_DIR/init.sh /etc/init.d/telegraf + chmod +x /etc/init.d/telegraf +} + +function install_systemd { + cp -f $SCRIPT_DIR/telegraf.service /lib/systemd/system/telegraf.service + systemctl enable telegraf +} + +function install_update_rcd { + update-rc.d telegraf defaults +} + +function install_chkconfig { + chkconfig --add telegraf +} + +id telegraf &>/dev/null +if [[ $? -ne 0 ]]; then + useradd --system -U -M telegraf -s /bin/false -d /etc/telegraf +fi + +chown -R -L telegraf:telegraf $LOG_DIR + +# Remove legacy symlink, if it exists +if [[ -L /etc/init.d/telegraf ]]; then + rm -f /etc/init.d/telegraf +fi + +# Add defaults file, if it doesn't exist +if [[ ! -f /etc/default/telegraf ]]; then + touch /etc/default/telegraf +fi + +# Add .d configuration directory +if [[ ! -d /etc/telegraf/telegraf.d ]]; then + mkdir -p /etc/telegraf/telegraf.d +fi + +# Distribution-specific logic +if [[ -f /etc/redhat-release ]]; then + # RHEL-variant logic + which systemctl &>/dev/null + if [[ $? -eq 0 ]]; then + install_systemd + else + # Assuming sysv + install_init + install_chkconfig + fi +elif [[ -f /etc/lsb-release ]]; then + # Debian/Ubuntu logic + which systemctl &>/dev/null + if [[ $? -eq 0 ]]; then + install_systemd + else + # Assuming sysv + install_init + install_update_rcd + fi +fi diff --git a/scripts/pre-install.sh b/scripts/pre-install.sh new file mode 100644 index 000000000..443d6bc87 --- /dev/null +++ b/scripts/pre-install.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [[ -f /etc/opt/telegraf/telegraf.conf ]]; then + # Legacy configuration found + if [[ ! -d /etc/telegraf ]]; then + # New configuration does not exist, move legacy configuration to new location + echo -e "Please note, Telegraf's configuration is now located at '/etc/telegraf' (previously '/etc/opt/telegraf')." + mv /etc/opt/telegraf /etc/telegraf + + backup_name="telegraf.conf.$(date +%s).backup" + echo "A backup of your current configuration can be found at: /etc/telegraf/$backup_name" + cp -a /etc/telegraf/telegraf.conf /etc/telegraf/$backup_name + fi +fi diff --git a/scripts/telegraf.service b/scripts/telegraf.service index 62fc23a25..d92f3072c 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -6,7 +6,7 @@ After=network.target [Service] EnvironmentFile=-/etc/default/telegraf User=telegraf -ExecStart=/opt/telegraf/telegraf -config /etc/opt/telegraf/telegraf.conf -config-directory /etc/opt/telegraf/telegraf.d $TELEGRAF_OPTS +ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d ${TELEGRAF_OPTS} Restart=on-failure KillMode=process From 81fa063338bedbf62f06c3965732a171faa264a4 Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Fri, 8 Jan 2016 15:34:11 -0600 Subject: [PATCH 051/103] Removed data directory entries, since Telegraf doesn't need them. --- build.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/build.py b/build.py index 1186a239b..d6d6bee01 100755 --- a/build.py +++ b/build.py @@ -26,7 +26,6 @@ except ImportError: # PACKAGING VARIABLES INSTALL_ROOT_DIR = "/usr/bin" LOG_DIR = "/var/log/telegraf" -DATA_DIR = "/var/lib/telegraf" SCRIPT_DIR = "/usr/lib/telegraf/scripts" CONFIG_DIR = "/etc/telegraf" LOGROTATE_DIR = "/etc/logrotate.d" @@ -362,7 +361,7 @@ def create_package_fs(build_root): print "\t- Creating a filesystem hierarchy from directory: {}".format(build_root) # Using [1:] for the path names due to them being absolute # (will overwrite previous paths, per 'os.path.join' documentation) - dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], DATA_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] + dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] for d in dirs: create_dir(os.path.join(build_root, d)) os.chmod(os.path.join(build_root, d), 0755) From f37f8ac815d4511dbcf7d11415fa2f712abe49df Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 8 Jan 2016 15:07:44 -0700 Subject: [PATCH 052/103] Update changelog and readme for package updates --- CHANGELOG.md | 8 ++++++++ README.md | 28 ++++++++++++---------------- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbb364bac..078e600f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ ## v0.3.0 [unreleased] ### Release Notes +- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin` +and configuration files are in `/etc/telegraf` - **breaking change** `plugins` have been renamed to `inputs`. This was done because `plugins` is too generic, as there are now also "output plugins", and will likely be "aggregator plugins" and "filter plugins" in the future. Additionally, @@ -19,6 +21,12 @@ instead of only `cpu_` - The prometheus plugin schema has not been changed (measurements have not been aggregated). +### Packaging change note: + +RHEL/CentOS users upgrading from 0.2 to 0.3 will probably have their +configurations overwritten by the upgrade. There is a backup stored at +/etc/telegraf/telegraf.conf.$(date +%s).backup. + ### Features - Plugin measurements aggregated into a single measurement. - Added ability to specify per-plugin tags diff --git a/README.md b/README.md index e8a7cc6a3..e86750f18 100644 --- a/README.md +++ b/README.md @@ -35,8 +35,8 @@ Latest: ##### Package instructions: -* Telegraf binary is installed in `/opt/telegraf/telegraf` -* Telegraf daemon configuration file is in `/etc/opt/telegraf/telegraf.conf` +* Telegraf binary is installed in `/usr/bin/telegraf` +* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf` * On sysv systems, the telegraf daemon can be controlled via `service telegraf [action]` * On systemd systems (such as Ubuntu 15+), the telegraf daemon can be @@ -126,9 +126,6 @@ configuration options. ## Supported Input Plugins -**You can view usage instructions for each plugin by running** -`telegraf -usage `. - Telegraf currently has support for collecting metrics from: * aerospike @@ -169,9 +166,7 @@ Telegraf currently has support for collecting metrics from: * diskio * swap -## Supported Input Service Plugins - -Telegraf can collect metrics via the following services: +Telegraf can also collect metrics via the following service plugins: * statsd * kafka_consumer @@ -182,15 +177,16 @@ want to add support for another service or third-party API. ## Supported Output Plugins * influxdb -* nsq -* kafka -* datadog -* opentsdb -* amqp (rabbitmq) -* mqtt -* librato -* prometheus * amon +* amqp +* datadog +* kafka +* amazon kinesis +* librato +* mqtt +* nsq +* opentsdb +* prometheus * riemann ## Contributing From 56509a61b9e4530c4b3bc2ee0efa531e53506a7c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 8 Jan 2016 17:22:23 -0700 Subject: [PATCH 053/103] Change 0.3.0 -> 0.10.0 --- CHANGELOG.md | 4 ++-- README.md | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 078e600f5..b944c6ac3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v0.3.0 [unreleased] +## v0.10.0 [unreleased] ### Release Notes - Linux packages have been taken out of `opt`, the binary is now in `/usr/bin` @@ -23,7 +23,7 @@ aggregated). ### Packaging change note: -RHEL/CentOS users upgrading from 0.2 to 0.3 will probably have their +RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their configurations overwritten by the upgrade. There is a backup stored at /etc/telegraf/telegraf.conf.$(date +%s).backup. diff --git a/README.md b/README.md index e86750f18..c405806f7 100644 --- a/README.md +++ b/README.md @@ -17,17 +17,17 @@ new plugins. ## Installation: -NOTE: Telegraf 0.3.x is **not** backwards-compatible with previous versions of +NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions of telegraf, both in the database layout and the configuration file. 0.2.x will continue to be supported, see below for download links. -TODO: link to blog post about 0.3.x changes. +TODO: link to blog post about 0.10.x changes. ### Linux deb and rpm packages: Latest: -* http://get.influxdb.org/telegraf/telegraf_0.3.0_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.3.0-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.10.0_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.10.0-1.x86_64.rpm 0.2.x: * http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb @@ -45,9 +45,9 @@ controlled via `systemctl [action] telegraf` ### Linux binaries: Latest: -* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.3.0.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_386_0.3.0.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.3.0.tar.gz +* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.10.0.tar.gz +* http://get.influxdb.org/telegraf/telegraf_linux_386_0.10.0.tar.gz +* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.10.0.tar.gz 0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz From 2ec1ffdc11c164779a806028bc3d65077c3daafa Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 11 Jan 2016 13:33:19 -0700 Subject: [PATCH 054/103] Fix Telegraf s3 upload and readme links fixes #505 --- README.md | 8 ++++---- build.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c405806f7..00a81025f 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ TODO: link to blog post about 0.10.x changes. ### Linux deb and rpm packages: Latest: -* http://get.influxdb.org/telegraf/telegraf_0.10.0_amd64.deb +* http://get.influxdb.org/telegraf/telegraf_0.10.0-1_amd64.deb * http://get.influxdb.org/telegraf/telegraf-0.10.0-1.x86_64.rpm 0.2.x: @@ -45,9 +45,9 @@ controlled via `systemctl [action] telegraf` ### Linux binaries: Latest: -* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.10.0.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_386_0.10.0.tar.gz -* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.10.0.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.0_linux_amd64.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.0_linux_386.tar.gz +* http://get.influxdb.org/telegraf/telegraf-0.10.0_linux_arm.tar.gz 0.2.x: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz diff --git a/build.py b/build.py index d6d6bee01..cd8b74d09 100755 --- a/build.py +++ b/build.py @@ -204,9 +204,9 @@ def upload_packages(packages, nightly=False): print "" c = boto.connect_s3() # TODO(rossmcdonald) - Set to different S3 bucket for release vs nightly - bucket = c.get_bucket('telegraf-nightly') + bucket = c.get_bucket('get.influxdb.org') for p in packages: - name = os.path.basename(p) + name = os.path.join('telegraf', os.path.basename(p)) if bucket.get_key(name) is None or nightly: print "\t - Uploading {}...".format(name), k = Key(bucket) @@ -481,7 +481,7 @@ def print_usage(): print "\t --commit= \n\t\t- Use specific commit for build (currently a NOOP)." print "\t --branch= \n\t\t- Build from a specific branch (currently a NOOP)." print "\t --rc= \n\t\t- Whether or not the build is a release candidate (affects version information)." - print "\t --iteration= \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise)." + print "\t --iteration= \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise)." print "\t --race \n\t\t- Whether the produced build should have race detection enabled." print "\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s)." print "\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information)." From fb837ca66d40e0275966f9bd445387c2af0c6c5b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 14 Jan 2016 09:20:01 -0800 Subject: [PATCH 055/103] Add 0.10.0 blog post link to README --- README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 00a81025f..a0e4079d6 100644 --- a/README.md +++ b/README.md @@ -17,11 +17,12 @@ new plugins. ## Installation: -NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions of -telegraf, both in the database layout and the configuration file. 0.2.x will -continue to be supported, see below for download links. +NOTE: Telegraf 0.10.x is **not** backwards-compatible with previous versions +of telegraf, both in the database layout and the configuration file. 0.2.x +will continue to be supported, see below for download links. -TODO: link to blog post about 0.10.x changes. +For more details on the differences between Telegraf 0.2.x and 0.10.x, see +the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/). ### Linux deb and rpm packages: From 3c898474896b652785b96b23b513aa58e398e761 Mon Sep 17 00:00:00 2001 From: Philip Silva Date: Tue, 12 Jan 2016 12:12:49 +0100 Subject: [PATCH 056/103] internal: FlattenJSON, flatten arrays as well With HTTP JSON or Elasticsearch, one can also process values nested in arrays. --- internal/internal.go | 11 ++++++++++- plugins/inputs/elasticsearch/testdata_test.go | 14 ++++++++++---- plugins/inputs/exec/exec_test.go | 6 +++++- plugins/inputs/httpjson/httpjson_test.go | 15 +++++++++------ 4 files changed, 34 insertions(+), 12 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index fc55ba529..8b0b33a41 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "strconv" "strings" "time" ) @@ -49,9 +50,17 @@ func (f *JSONFlattener) FlattenJSON( return err } } + case []interface{}: + for i, v := range t { + k := strconv.Itoa(i) + err := f.FlattenJSON(fieldname+"_"+k+"_", v) + if err != nil { + return nil + } + } case float64: f.Fields[fieldname] = t - case bool, string, []interface{}, nil: + case bool, string, nil: // ignored types return nil default: diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index 03e512f81..bca1f9e45 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -562,6 +562,9 @@ var indicesExpected = map[string]interface{}{ } var osExpected = map[string]interface{}{ + "load_average_0": float64(0.01), + "load_average_1": float64(0.04), + "load_average_2": float64(0.05), "swap_used_in_bytes": float64(0), "swap_free_in_bytes": float64(487997440), "timestamp": float64(1436460392944), @@ -724,10 +727,13 @@ var threadPoolExpected = map[string]interface{}{ } var fsExpected = map[string]interface{}{ - "timestamp": float64(1436460392946), - "total_free_in_bytes": float64(16909316096), - "total_available_in_bytes": float64(15894814720), - "total_total_in_bytes": float64(19507089408), + "data_0_total_in_bytes": float64(19507089408), + "data_0_free_in_bytes": float64(16909316096), + "data_0_available_in_bytes": float64(15894814720), + "timestamp": float64(1436460392946), + "total_free_in_bytes": float64(16909316096), + "total_available_in_bytes": float64(15894814720), + "total_total_in_bytes": float64(19507089408), } var transportExpected = map[string]interface{}{ diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index d3e54429d..64fd69fce 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -59,13 +59,17 @@ func TestExec(t *testing.T) { var acc testutil.Accumulator err := e.Gather(&acc) require.NoError(t, err) - assert.Equal(t, acc.NFields(), 4, "non-numeric measurements should be ignored") + assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") fields := map[string]interface{}{ "num_processes": float64(82), "cpu_used": float64(8234), "cpu_free": float64(32), "percent": float64(0.81), + "users_0": float64(0), + "users_1": float64(1), + "users_2": float64(2), + "users_3": float64(3), } acc.AssertContainsFields(t, "exec", fields) } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 7e9ffd331..dbc818344 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -19,12 +19,12 @@ const validJSON = ` }, "ignored_null": null, "integer": 4, - "ignored_list": [3, 4], + "list": [3, 4], "ignored_parent": { - "another_ignored_list": [4], "another_ignored_null": null, "ignored_string": "hello, world!" - } + }, + "another_list": [4] }` const validJSONTags = ` @@ -35,8 +35,11 @@ const validJSONTags = ` }` var expectedFields = map[string]interface{}{ - "parent_child": float64(3), - "integer": float64(4), + "parent_child": float64(3), + "list_0": float64(3), + "list_1": float64(4), + "another_list_0": float64(4), + "integer": float64(4), } const invalidJSON = "I don't think this is JSON" @@ -123,7 +126,7 @@ func TestHttpJson200(t *testing.T) { var acc testutil.Accumulator err := service.Gather(&acc) require.NoError(t, err) - assert.Equal(t, 4, acc.NFields()) + assert.Equal(t, 10, acc.NFields()) for _, srv := range service.Servers { tags := map[string]string{"server": srv} mname := "httpjson_" + service.Name From 3cc1fecb535b306b865d82711ec58a153821160b Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Mon, 11 Jan 2016 17:19:21 -0500 Subject: [PATCH 057/103] Ping input doesn't return response time metric when timeout closes #506 --- CHANGELOG.md | 10 +++++++++- plugins/inputs/ping/ping.go | 4 +++- plugins/inputs/ping/ping_test.go | 1 - 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b944c6ac3..1b398d00c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,12 @@ -## v0.10.0 [unreleased] +## v0.10.1 [unreleased] + +### Features +- [#509](https://github.com/influxdb/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! + +### Bugfixes +- [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! + +## v0.10.0 [2014-01-12] ### Release Notes - Linux packages have been taken out of `opt`, the binary is now in `/usr/bin` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index e03fc71e8..ff7cebb99 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -86,7 +86,9 @@ func (p *Ping) Gather(acc inputs.Accumulator) error { "packets_transmitted": trans, "packets_received": rec, "percent_packet_loss": loss, - "average_response_ms": avg, + } + if avg > 0 { + fields["average_response_ms"] = avg } acc.AddFields("ping", fields, tags) }(url, acc) diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 7ae86534d..b98a08be8 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -193,7 +193,6 @@ func TestBadPingGather(t *testing.T) { "packets_transmitted": 2, "packets_received": 0, "percent_packet_loss": 100.0, - "average_response_ms": 0.0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) } From 7531e218c179d00554c0ed332f1f100bc68290e8 Mon Sep 17 00:00:00 2001 From: Hannu Valtonen Date: Tue, 12 Jan 2016 23:10:36 +0200 Subject: [PATCH 058/103] build.py: Make build script work on both Python2.x and Python3.x While at it also add a missing dependency on lsof required by the netstat plugin. closes #512 --- CHANGELOG.md | 1 + build.py | 240 ++++++++++++++++++++++++++------------------------- 2 files changed, 123 insertions(+), 118 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b398d00c..56058b4d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ### Features - [#509](https://github.com/influxdb/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! +- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/build.py b/build.py index cd8b74d09..a5892f26a 100755 --- a/build.py +++ b/build.py @@ -92,31 +92,32 @@ def run(command, allow_failure=False, shell=False): out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) else: out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) + out = out.decode("utf8") except subprocess.CalledProcessError as e: - print "" - print "" - print "Executed command failed!" - print "-- Command run was: {}".format(command) - print "-- Failure was: {}".format(e.output) + print("") + print("") + print("Executed command failed!") + print("-- Command run was: {}".format(command)) + print("-- Failure was: {}".format(e.output)) if allow_failure: - print "Continuing..." + print("Continuing...") return None else: - print "" - print "Stopping." + print("") + print("Stopping.") sys.exit(1) except OSError as e: - print "" - print "" - print "Invalid command!" - print "-- Command run was: {}".format(command) - print "-- Failure was: {}".format(e) + print("") + print("") + print("Invalid command!") + print("-- Command run was: {}".format(command)) + print("-- Failure was: {}".format(e)) if allow_failure: - print "Continuing..." + print("Continuing...") return out else: - print "" - print "Stopping." + print("") + print("Stopping.") sys.exit(1) else: return out @@ -173,42 +174,42 @@ def check_path_for(b): return full_path def check_environ(build_dir = None): - print "\nChecking environment:" + print("\nChecking environment:") for v in [ "GOPATH", "GOBIN", "GOROOT" ]: - print "\t- {} -> {}".format(v, os.environ.get(v)) + print("\t- {} -> {}".format(v, os.environ.get(v))) cwd = os.getcwd() if build_dir == None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: - print "\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures." + print("\n!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures.") def check_prereqs(): - print "\nChecking for dependencies:" + print("\nChecking for dependencies:") for req in prereqs: - print "\t- {} ->".format(req), + print("\t- {} ->".format(req),) path = check_path_for(req) if path: - print "{}".format(path) + print("{}".format(path)) else: - print "?" + print("?") for req in optional_prereqs: - print "\t- {} (optional) ->".format(req), + print("\t- {} (optional) ->".format(req)) path = check_path_for(req) if path: - print "{}".format(path) + print("{}".format(path)) else: - print "?" - print "" + print("?") + print("") def upload_packages(packages, nightly=False): - print "Uploading packages to S3..." - print "" + print("Uploading packages to S3...") + print("") c = boto.connect_s3() # TODO(rossmcdonald) - Set to different S3 bucket for release vs nightly bucket = c.get_bucket('get.influxdb.org') for p in packages: name = os.path.join('telegraf', os.path.basename(p)) if bucket.get_key(name) is None or nightly: - print "\t - Uploading {}...".format(name), + print("\t - Uploading {}...".format(name)) k = Key(bucket) k.key = name if nightly: @@ -216,41 +217,41 @@ def upload_packages(packages, nightly=False): else: n = k.set_contents_from_filename(p, replace=False) k.make_public() - print "[ DONE ]" + print("[ DONE ]") else: - print "\t - Not uploading {}, already exists.".format(p) - print "" + print("\t - Not uploading {}, already exists.".format(p)) + print("") def run_tests(race, parallel, timeout, no_vet): get_command = "go get -d -t ./..." - print "Retrieving Go dependencies...", + print("Retrieving Go dependencies...") sys.stdout.flush() run(get_command) - print "done." - print "Running tests:" - print "\tRace: ", race + print("done.") + print("Running tests:") + print("\tRace: ", race) if parallel is not None: - print "\tParallel:", parallel + print("\tParallel:", parallel) if timeout is not None: - print "\tTimeout:", timeout + print("\tTimeout:", timeout) sys.stdout.flush() p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if len(out) > 0 or len(err) > 0: - print "Code not formatted. Please use 'go fmt ./...' to fix formatting errors." - print out - print err + print("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") + print(out) + print(err) return False if not no_vet: p = subprocess.Popen(["go", "tool", "vet", "-composites=false", "./"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if len(out) > 0 or len(err) > 0: - print "Go vet failed. Please run 'go vet ./...' and fix any errors." - print out - print err + print("Go vet failed. Please run 'go vet ./...' and fix any errors.") + print(out) + print(err) return False else: - print "Skipping go vet ..." + print("Skipping go vet ...") sys.stdout.flush() test_command = "go test -v" if race: @@ -262,10 +263,10 @@ def run_tests(race, parallel, timeout, no_vet): test_command += " ./..." code = os.system(test_command) if code != 0: - print "Tests Failed" + print("Tests Failed") return False else: - print "Tests Passed" + print("Tests Passed") return True def build(version=None, @@ -279,26 +280,26 @@ def build(version=None, clean=False, outdir=".", goarm_version="6"): - print "-------------------------" - print "" - print "Build plan:" - print "\t- version: {}".format(version) + print("-------------------------") + print("") + print("Build plan:") + print("\t- version: {}".format(version)) if rc: - print "\t- release candidate: {}".format(rc) - print "\t- commit: {}".format(commit) - print "\t- branch: {}".format(branch) - print "\t- platform: {}".format(platform) - print "\t- arch: {}".format(arch) + print("\t- release candidate: {}".format(rc)) + print("\t- commit: {}".format(commit)) + print("\t- branch: {}".format(branch)) + print("\t- platform: {}".format(platform)) + print("\t- arch: {}".format(arch)) if arch == 'arm' and goarm_version: - print "\t- ARM version: {}".format(goarm_version) - print "\t- nightly? {}".format(str(nightly).lower()) - print "\t- race enabled? {}".format(str(race).lower()) - print "" + print("\t- ARM version: {}".format(goarm_version)) + print("\t- nightly? {}".format(str(nightly).lower())) + print("\t- race enabled? {}".format(str(race).lower())) + print("") if not os.path.exists(outdir): os.makedirs(outdir) elif clean and outdir != '/': - print "Cleaning build directory..." + print("Cleaning build directory...") shutil.rmtree(outdir) os.makedirs(outdir) @@ -306,14 +307,14 @@ def build(version=None, # If a release candidate, update the version information accordingly version = "{}rc{}".format(version, rc) - print "Starting build..." - for b, c in targets.iteritems(): - print "\t- Building '{}'...".format(os.path.join(outdir, b)), + print("Starting build...") + for b, c in targets.items(): + print("\t- Building '{}'...".format(os.path.join(outdir, b)),) build_command = "" build_command += "GOOS={} GOARCH={} ".format(platform, arch) if arch == "arm" and goarm_version: if goarm_version not in ["5", "6", "7", "arm64"]: - print "!! Invalid ARM build version: {}".format(goarm_version) + print("!! Invalid ARM build version: {}".format(goarm_version)) build_command += "GOARM={} ".format(goarm_version) build_command += "go build -o {} ".format(os.path.join(outdir, b)) if race: @@ -331,20 +332,20 @@ def build(version=None, build_command += "-X main.Commit={}\" ".format(get_current_commit()) build_command += c run(build_command, shell=True) - print "[ DONE ]" - print "" + print("[ DONE ]") + print("") def create_dir(path): try: os.makedirs(path) except OSError as e: - print e + print(e) def rename_file(fr, to): try: os.rename(fr, to) except OSError as e: - print e + print(e) # Return the original filename return fr else: @@ -355,27 +356,27 @@ def copy_file(fr, to): try: shutil.copy(fr, to) except OSError as e: - print e + print(e) def create_package_fs(build_root): - print "\t- Creating a filesystem hierarchy from directory: {}".format(build_root) + print("\t- Creating a filesystem hierarchy from directory: {}".format(build_root)) # Using [1:] for the path names due to them being absolute # (will overwrite previous paths, per 'os.path.join' documentation) dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] for d in dirs: create_dir(os.path.join(build_root, d)) - os.chmod(os.path.join(build_root, d), 0755) + os.chmod(os.path.join(build_root, d), 0o755) def package_scripts(build_root): - print "\t- Copying scripts and sample configuration to build directory" + print("\t- Copying scripts and sample configuration to build directory") shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) - os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0644) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) - os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0644) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf")) - os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0644) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644) shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) - os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0644) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644) def go_get(update=False): get_command = None @@ -383,24 +384,27 @@ def go_get(update=False): get_command = "go get -u -f -d ./..." else: get_command = "go get -d ./..." - print "Retrieving Go dependencies...", + print("Retrieving Go dependencies...") run(get_command) - print "done.\n" + print("done.\n") def generate_md5_from_file(path): m = hashlib.md5() with open(path, 'rb') as f: - for chunk in iter(lambda: f.read(4096), b""): - m.update(chunk) + while True: + data = f.read(4096) + if not data: + break + m.update(data) return m.hexdigest() def build_packages(build_output, version, nightly=False, rc=None, iteration=1): outfiles = [] tmp_build_dir = create_temp_dir() try: - print "-------------------------" - print "" - print "Packaging..." + print("-------------------------") + print("") + print("Packaging...") for p in build_output: # Create top-level folder displaying which platform (linux, etc) create_dir(os.path.join(tmp_build_dir, p)) @@ -419,11 +423,11 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1): b = b + '.exe' fr = os.path.join(current_location, b) to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b) - print "\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to) + print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to)) copy_file(fr, to) # Package the directory structure for package_type in supported_packages[p]: - print "\t- Packaging directory '{}' as '{}'...".format(build_root, package_type), + print("\t- Packaging directory '{}' as '{}'...".format(build_root, package_type)) name = "telegraf" package_version = version package_iteration = iteration @@ -448,52 +452,53 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1): current_location) if package_type == "rpm": fpm_command += "--depends coreutils " + fpm_command += "--depends lsof" out = run(fpm_command, shell=True) matches = re.search(':path=>"(.*)"', out) outfile = None if matches is not None: outfile = matches.groups()[0] if outfile is None: - print "[ COULD NOT DETERMINE OUTPUT ]" + print("[ COULD NOT DETERMINE OUTPUT ]") else: # Strip nightly version (the unix epoch) from filename if nightly and package_type in ['deb', 'rpm']: outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) outfiles.append(os.path.join(os.getcwd(), outfile)) - print "[ DONE ]" + print("[ DONE ]") # Display MD5 hash for generated package - print "\t\tMD5 = {}".format(generate_md5_from_file(outfile)) - print "" + print("\t\tMD5 = {}".format(generate_md5_from_file(outfile))) + print("") return outfiles finally: # Cleanup shutil.rmtree(tmp_build_dir) def print_usage(): - print "Usage: ./build.py [options]" - print "" - print "Options:" - print "\t --outdir= \n\t\t- Send build output to a specified path. Defaults to ./build." - print "\t --arch= \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all" - print "\t --goarm= \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6" - print "\t --platform= \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all" - print "\t --version= \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag." - print "\t --commit= \n\t\t- Use specific commit for build (currently a NOOP)." - print "\t --branch= \n\t\t- Build from a specific branch (currently a NOOP)." - print "\t --rc= \n\t\t- Whether or not the build is a release candidate (affects version information)." - print "\t --iteration= \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise)." - print "\t --race \n\t\t- Whether the produced build should have race detection enabled." - print "\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s)." - print "\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information)." - print "\t --update \n\t\t- Whether dependencies should be updated prior to building." - print "\t --test \n\t\t- Run Go tests. Will not produce a build." - print "\t --parallel \n\t\t- Run Go tests in parallel up to the count specified." - print "\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s." - print "\t --clean \n\t\t- Clean the build output directory prior to creating build." - print "" + print("Usage: ./build.py [options]") + print("") + print("Options:") + print("\t --outdir= \n\t\t- Send build output to a specified path. Defaults to ./build.") + print("\t --arch= \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386, arm, or all") + print("\t --goarm= \n\t\t- Build for specified ARM version (when building for ARM). Default value is: 6") + print("\t --platform= \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all") + print("\t --version= \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag.") + print("\t --commit= \n\t\t- Use specific commit for build (currently a NOOP).") + print("\t --branch= \n\t\t- Build from a specific branch (currently a NOOP).") + print("\t --rc= \n\t\t- Whether or not the build is a release candidate (affects version information).") + print("\t --iteration= \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise).") + print("\t --race \n\t\t- Whether the produced build should have race detection enabled.") + print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).") + print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).") + print("\t --update \n\t\t- Whether dependencies should be updated prior to building.") + print("\t --test \n\t\t- Run Go tests. Will not produce a build.") + print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.") + print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.") + print("\t --clean \n\t\t- Clean the build output directory prior to creating build.") + print("") def print_package_summary(packages): - print packages + print(packages) def main(): # Command-line arguments @@ -577,13 +582,13 @@ def main(): print_usage() return 0 else: - print "!! Unknown argument: {}".format(arg) + print("!! Unknown argument: {}".format(arg)) print_usage() return 1 if nightly: if rc: - print "!! Cannot be both nightly and a release candidate! Stopping." + print("!! Cannot be both nightly and a release candidate! Stopping.") return 1 # In order to support nightly builds on the repository, we are adding the epoch timestamp # to the version so that version numbers are always greater than the previous nightly. @@ -622,7 +627,7 @@ def main(): platforms = [] single_build = True if target_platform == 'all': - platforms = supported_builds.keys() + platforms = list(supported_builds.keys()) single_build = False else: platforms = [target_platform] @@ -655,7 +660,7 @@ def main(): # Build packages if package: if not check_path_for("fpm"): - print "!! Cannot package without command 'fpm'. Stopping." + print("!! Cannot package without command 'fpm'. Stopping.") return 1 packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration) # TODO(rossmcdonald): Add nice output for print_package_summary() @@ -667,4 +672,3 @@ def main(): if __name__ == '__main__': sys.exit(main()) - From 923be102b3a68792a960ebf7710b329dc0d24113 Mon Sep 17 00:00:00 2001 From: Kostas Botsas Date: Thu, 14 Jan 2016 15:55:53 -0800 Subject: [PATCH 059/103] Align exec documentation with v0.10 updates --- plugins/inputs/exec/README.md | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 7aa52db33..29203f9a9 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -7,13 +7,13 @@ For example, if you have a json-returning command called mycollector, you could setup the exec plugin with: ``` -[[exec.commands]] -command = "/usr/bin/mycollector --output=json" -name = "mycollector" -interval = 10 +[[inputs.exec]] + command = "/usr/bin/mycollector --output=json" + name_suffix = "_mycollector" + interval = 10 ``` -The name is used as a prefix for the measurements. +The name suffix is appended to exec as "exec_name_suffix" to identify the input stream. The interval is used to determine how often a particular command should be run. Each time the exec plugin runs, it will only run a particular command if it has been at least @@ -22,7 +22,7 @@ time the exec plugin runs, it will only run a particular command if it has been # Sample -Let's say that we have a command named "mycollector", which gives the following output: +Let's say that we have a command with the name_suffix "_mycollector", which gives the following output: ```json { "a": 0.5, @@ -34,9 +34,7 @@ Let's say that we have a command named "mycollector", which gives the following } ``` -The collected metrics will be: +The collected metrics will be stored as field values under the same measurement "exec_mycollector": ``` -exec_mycollector_a value=0.5 -exec_mycollector_b_d value=0.1 -exec_mycollector_b_e value=5 + exec_mycollector a=0.5,b_c="some text",b_d=0.1,b_e=5 1452815002357578567 ``` From a39a7a7a031273db4199e5a4942e709ab1f75a0d Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 14 Jan 2016 10:31:47 -0800 Subject: [PATCH 060/103] Add an interface:"all" tag to the net protocol counters fixes #508 --- CHANGELOG.md | 1 + plugins/inputs/system/net.go | 5 ++++- plugins/inputs/system/net_test.go | 5 ++++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56058b4d3..9e7fc92da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! +- [#508](https://github.com/influxdb/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin ## v0.10.0 [2014-01-12] diff --git a/plugins/inputs/system/net.go b/plugins/inputs/system/net.go index 95df7a741..42f0d5854 100644 --- a/plugins/inputs/system/net.go +++ b/plugins/inputs/system/net.go @@ -94,7 +94,10 @@ func (s *NetIOStats) Gather(acc inputs.Accumulator) error { fields[name] = value } } - acc.AddFields("net", fields, nil) + tags := map[string]string{ + "interface": "all", + } + acc.AddFields("net", fields, tags) return nil } diff --git a/plugins/inputs/system/net_test.go b/plugins/inputs/system/net_test.go index ba71848d1..3ec2cb990 100644 --- a/plugins/inputs/system/net_test.go +++ b/plugins/inputs/system/net_test.go @@ -80,7 +80,10 @@ func TestNetStats(t *testing.T) { "udp_noports": int64(892592), "udp_indatagrams": int64(4655), } - acc.AssertContainsTaggedFields(t, "net", fields2, make(map[string]string)) + ntags = map[string]string{ + "interface": "all", + } + acc.AssertContainsTaggedFields(t, "net", fields2, ntags) acc.Points = nil From d6ef3b1e02907c0561ce1c8872e1e99ad07d9ef9 Mon Sep 17 00:00:00 2001 From: Kevin Fitzpatrick Date: Thu, 14 Jan 2016 11:34:22 -0800 Subject: [PATCH 061/103] Note on where to look for plugin information --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a0e4079d6..3b6303a15 100644 --- a/README.md +++ b/README.md @@ -127,7 +127,11 @@ configuration options. ## Supported Input Plugins -Telegraf currently has support for collecting metrics from: +Telegraf currently has support for collecting metrics from many sources. For +more information on each, please look at the directory of the same name in +`plugins/inputs`. + +Currently implemented sources: * aerospike * apache From e8907acd281d5f68f38dbba5bace3b3047f1c012 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 14 Jan 2016 22:35:05 -0700 Subject: [PATCH 062/103] Update Godeps and fix changelog 2014->2016 --- CHANGELOG.md | 2 +- Godeps | 30 +++++++++++++++--------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e7fc92da..d93b38133 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! - [#508](https://github.com/influxdb/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin -## v0.10.0 [2014-01-12] +## v0.10.0 [2016-01-12] ### Release Notes - Linux packages have been taken out of `opt`, the binary is now in `/usr/bin` diff --git a/Godeps b/Godeps index 2584179bd..1b427674a 100644 --- a/Godeps +++ b/Godeps @@ -3,18 +3,18 @@ github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81 github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339 github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757 -github.com/aws/aws-sdk-go f09322ae1e6468fe828c862542389bc45baf3c00 +github.com/aws/aws-sdk-go c4c1a1a2a076858fe18b2be674d833c796c45b09 github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d -github.com/boltdb/bolt 34a0fa5307f7562980fb8e7ff4723f7987edf49b +github.com/boltdb/bolt 6465994716bf6400605746e79224cf1e7ed68725 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 -github.com/dancannon/gorethink a124c9663325ed9f7fb669d17c69961b59151e6e +github.com/dancannon/gorethink ff457cac6a529d9749d841a733d76e8305cba3c8 github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 -github.com/fsouza/go-dockerclient 175e1df973274f04e9b459a62cffc49808f1a649 +github.com/fsouza/go-dockerclient 2fb7694010aa553998ed513dc8805ab00708077a github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3 -github.com/go-sql-driver/mysql 7a8740a6bd8feb6af5786ab9a9f1513970019d8c -github.com/gogo/protobuf 7b1331554dbe882cb3613ee8f1824a5583627963 +github.com/go-sql-driver/mysql 6fd058ce0d6b7ee43174e80d5a3e7f483c4dfbe5 +github.com/gogo/protobuf c57e439bad574c2e0877ff18d514badcfced004d github.com/golang/protobuf 2402d76f3d41f928c7902a765dfc872356dd3aad github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 @@ -22,31 +22,31 @@ github.com/hailocab/go-hostpool 50839ee41f32bfca8d03a183031aa634b2dc1c64 github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458 github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294 github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee -github.com/influxdb/influxdb bd63489ef0faae2465ae5b1f0a28bd7e71e02e38 +github.com/influxdb/influxdb db84a6ed76353905432ff8bd91527c68b3ea1be6 github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 -github.com/klauspost/crc32 a3b15ae34567abb20a22992b989cd76f48d09c47 -github.com/lib/pq 11fc39a580a008f1f39bb3d11d984fb34ed778d9 +github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 +github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453 github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504 github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9 github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988 -github.com/pmezard/go-difflib e8554b8641db39598be7f6342874b958f12ae1d4 +github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/common 0a3005bb37bc411040083a55372e77c405f6464c github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f -github.com/shirou/gopsutil ef151b7ff7fe76308f89a389447b7b78dfa02e0f +github.com/shirou/gopsutil 8850f58d7035653e1ab90711481954c8ca1b9813 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 -github.com/stretchr/testify c92828f29518bc633893affbce12904ba41a7cfa +github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18 github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 -golang.org/x/crypto f23ba3a5ee43012fcb4b92e1a2a405a92554f4f2 -golang.org/x/net 520af5de654dc4dd4f0f65aa40e66dbbd9043df1 -gopkg.in/dancannon/gorethink.v1 a124c9663325ed9f7fb669d17c69961b59151e6e +golang.org/x/crypto 3760e016850398b85094c4c99e955b8c3dea5711 +golang.org/x/net 99ca920b6037ef77af8a11297150f7f0d8f4ef80 +gopkg.in/dancannon/gorethink.v1 e2cef022d0495329dfb0635991de76efcab5cf50 gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49 gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 From 7bfb42946e15334e905bb23d66b9593f53adfea4 Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Fri, 15 Jan 2016 09:51:04 -0600 Subject: [PATCH 063/103] Switched to /etc/debian_version for Debian/Ubuntu distribution recognition in post-install. closes #526 closes #525 --- scripts/post-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 19b895808..bb4803f8d 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -56,7 +56,7 @@ if [[ -f /etc/redhat-release ]]; then install_init install_chkconfig fi -elif [[ -f /etc/lsb-release ]]; then +elif [[ -f /etc/debian_version ]]; then # Debian/Ubuntu logic which systemctl &>/dev/null if [[ $? -eq 0 ]]; then From b44644b6bf802fc7b497165320621e1dfdb067a0 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Thu, 7 Jan 2016 20:49:14 -0500 Subject: [PATCH 064/103] Add response time to httpjson plugin closes #475 --- CHANGELOG.md | 1 + plugins/inputs/httpjson/httpjson.go | 25 +++++++++++++++--------- plugins/inputs/httpjson/httpjson_test.go | 18 +++++++++++++---- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d93b38133..be5194726 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Features - [#509](https://github.com/influxdb/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! - [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! +- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index e31085e3a..5763fd6fa 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -9,6 +9,7 @@ import ( "net/url" "strings" "sync" + "time" "github.com/influxdb/telegraf/internal" "github.com/influxdb/telegraf/plugins/inputs" @@ -119,7 +120,8 @@ func (h *HttpJson) gatherServer( acc inputs.Accumulator, serverURL string, ) error { - resp, err := h.sendRequest(serverURL) + resp, responseTime, err := h.sendRequest(serverURL) + if err != nil { return err } @@ -141,6 +143,9 @@ func (h *HttpJson) gatherServer( delete(jsonOut, tag) } + if responseTime >= 0 { + jsonOut["response_time"] = responseTime + } f := internal.JSONFlattener{} err = f.FlattenJSON("", jsonOut) if err != nil { @@ -164,11 +169,11 @@ func (h *HttpJson) gatherServer( // Returns: // string: body of the response // error : Any error that may have occurred -func (h *HttpJson) sendRequest(serverURL string) (string, error) { +func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { - return "", fmt.Errorf("Invalid server URL \"%s\"", serverURL) + return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) } params := url.Values{} @@ -180,19 +185,21 @@ func (h *HttpJson) sendRequest(serverURL string) (string, error) { // Create + send request req, err := http.NewRequest(h.Method, requestURL.String(), nil) if err != nil { - return "", err + return "", -1, err } + start := time.Now() resp, err := h.client.MakeRequest(req) if err != nil { - return "", err + return "", -1, err } - defer resp.Body.Close() defer resp.Body.Close() + responseTime := time.Since(start).Seconds() + body, err := ioutil.ReadAll(resp.Body) if err != nil { - return string(body), err + return string(body), responseTime, err } // Process response @@ -203,10 +210,10 @@ func (h *HttpJson) sendRequest(serverURL string) (string, error) { http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) - return string(body), err + return string(body), responseTime, err } - return string(body), err + return string(body), responseTime, err } func init() { diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index dbc818344..3f14290ff 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -14,7 +14,7 @@ import ( const validJSON = ` { "parent": { - "child": 3, + "child": 3.0, "ignored_child": "hi" }, "ignored_null": null, @@ -126,10 +126,16 @@ func TestHttpJson200(t *testing.T) { var acc testutil.Accumulator err := service.Gather(&acc) require.NoError(t, err) - assert.Equal(t, 10, acc.NFields()) + assert.Equal(t, 12, acc.NFields()) + // Set responsetime + for _, p := range acc.Points { + p.Fields["response_time"] = 1.0 + } + for _, srv := range service.Servers { tags := map[string]string{"server": srv} mname := "httpjson_" + service.Name + expectedFields["response_time"] = 1.0 acc.AssertContainsTaggedFields(t, mname, expectedFields, tags) } } @@ -188,11 +194,15 @@ func TestHttpJson200Tags(t *testing.T) { if service.Name == "other_webapp" { var acc testutil.Accumulator err := service.Gather(&acc) + // Set responsetime + for _, p := range acc.Points { + p.Fields["response_time"] = 1.0 + } require.NoError(t, err) - assert.Equal(t, 2, acc.NFields()) + assert.Equal(t, 4, acc.NFields()) for _, srv := range service.Servers { tags := map[string]string{"server": srv, "role": "master", "build": "123"} - fields := map[string]interface{}{"value": float64(15)} + fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)} mname := "httpjson_" + service.Name acc.AssertContainsTaggedFields(t, mname, fields, tags) } From c0d98ecd4bca2894d197eb8913512085a6052182 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Thu, 14 Jan 2016 23:03:39 +0000 Subject: [PATCH 065/103] Added initial support for gosensors module --- plugins/inputs/all/all.go | 1 + plugins/inputs/sensors/sensors.go | 88 +++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 plugins/inputs/sensors/sensors.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index fea1c7ca0..b6b1e74da 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -28,6 +28,7 @@ import ( _ "github.com/influxdb/telegraf/plugins/inputs/rabbitmq" _ "github.com/influxdb/telegraf/plugins/inputs/redis" _ "github.com/influxdb/telegraf/plugins/inputs/rethinkdb" + _ "github.com/influxdb/telegraf/plugins/inputs/sensors" _ "github.com/influxdb/telegraf/plugins/inputs/statsd" _ "github.com/influxdb/telegraf/plugins/inputs/system" _ "github.com/influxdb/telegraf/plugins/inputs/trig" diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go new file mode 100644 index 000000000..6267ac64b --- /dev/null +++ b/plugins/inputs/sensors/sensors.go @@ -0,0 +1,88 @@ +package sensors + +import ( + "strings" + + "github.com/md14454/gosensors" + + "github.com/influxdb/telegraf/plugins/inputs" +) + +type Sensors struct { + Sensors []string +} + +func (_ *Sensors) Description() string { + return "Monitor sensors using lm-sensors package" +} + +var sensorsSampleConfig = ` + # By default, telegraf gathers stats from all sensors + # detected by the lm-sensors module. + # + # Only collect stats from the selected sensors. Sensors + # are listed as :. This + # information can be found by running the sensors command. + # e.g. sensors -u + # A * as the feature name will return all features of the chip + # + # sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*", ... ] +` + +func (_ *Sensors) SampleConfig() string { + return sensorsSampleConfig +} + +func (s *Sensors) Gather(acc inputs.Accumulator) error { + gosensors.Init() + defer gosensors.Cleanup() + + for _, chip := range gosensors.GetDetectedChips() { + for _, feature := range chip.GetFeatures() { + chipName := chip.String() + featureLabel := feature.GetLabel() + + if len(s.Sensors) != 0 { + var found bool + + for _, sensor := range s.Sensors { + parts := strings.SplitN(":", sensor, 2) + + if parts[0] == chipName { + if parts[1] == "*" || parts[1] == featureLabel { + found = true + break + } + } + } + + if !found { + continue + } + } + + tags := map[string]string{ + "chip": chipName, + "adapter": chip.AdapterName(), + "feature-name": feature.Name, + "feature-label": featureLabel, + } + + fieldName := chipName + ":" + featureLabel + + fields := map[string]interface{}{ + featureLabel: feature.GetValue(), + } + + acc.AddFields("sensors", fields, tags) + } + } + + return nil +} + +func init() { + inputs.Add("sensors", func() inputs.Input { + return &Sensors{} + }) +} From 378b7467a4fd81363fa35eae8bcb8129ed1b8de0 Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Thu, 14 Jan 2016 23:12:35 +0000 Subject: [PATCH 066/103] Fixed an unused variable --- plugins/inputs/sensors/sensors.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 6267ac64b..a4e29eb76 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -71,7 +71,7 @@ func (s *Sensors) Gather(acc inputs.Accumulator) error { fieldName := chipName + ":" + featureLabel fields := map[string]interface{}{ - featureLabel: feature.GetValue(), + fieldName: feature.GetValue(), } acc.AddFields("sensors", fields, tags) From 9b96c62e46dc817833c5985ea10dc2a104d6e41b Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Fri, 15 Jan 2016 12:21:49 +0000 Subject: [PATCH 067/103] Change build configuration to linux only --- plugins/inputs/sensors/sensors.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index a4e29eb76..88e362154 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -1,3 +1,5 @@ +// +build linux + package sensors import ( From 2eda8d64c74bd177565d2cec3a29331b23f6e48b Mon Sep 17 00:00:00 2001 From: Matt Davis Date: Fri, 15 Jan 2016 12:22:04 +0000 Subject: [PATCH 068/103] Added infor to readme and changelog --- CHANGELOG.md | 1 + README.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index be5194726..77626e3a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ configurations overwritten by the upgrade. There is a backup stored at - Added ability to specify per-plugin measurement suffix and prefix. (`name_prefix` and `name_suffix`) - Added ability to override base plugin measurement name. (`name_override`) +- Added a sensors input based on lm-sensors ### Bugfixes diff --git a/README.md b/README.md index 3b6303a15..31a7590fb 100644 --- a/README.md +++ b/README.md @@ -162,6 +162,7 @@ Currently implemented sources: * twemproxy * zfs * zookeeper +* sensors * system * cpu * mem From 963a9429dd299070775599e696c3bb4982cf97a7 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 15 Jan 2016 11:22:33 -0700 Subject: [PATCH 069/103] Tweak changelog for sensors plugin, and add a non-linux build file closes #519 closes #168 --- CHANGELOG.md | 2 +- plugins/inputs/sensors/sensors.go | 16 ++++++++-------- plugins/inputs/sensors/sensors_notlinux.go | 3 +++ 3 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 plugins/inputs/sensors/sensors_notlinux.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 77626e3a8..fa7f9607a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#509](https://github.com/influxdb/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! - [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! - [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! +- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454! ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! @@ -44,7 +45,6 @@ configurations overwritten by the upgrade. There is a backup stored at - Added ability to specify per-plugin measurement suffix and prefix. (`name_prefix` and `name_suffix`) - Added ability to override base plugin measurement name. (`name_override`) -- Added a sensors input based on lm-sensors ### Bugfixes diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 88e362154..926003b83 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -19,16 +19,16 @@ func (_ *Sensors) Description() string { } var sensorsSampleConfig = ` - # By default, telegraf gathers stats from all sensors - # detected by the lm-sensors module. - # - # Only collect stats from the selected sensors. Sensors - # are listed as :. This - # information can be found by running the sensors command. - # e.g. sensors -u + # By default, telegraf gathers stats from all sensors detected by the + # lm-sensors module. + # + # Only collect stats from the selected sensors. Sensors are listed as + # :. This information can be found by running the + # sensors command, e.g. sensors -u + # # A * as the feature name will return all features of the chip # - # sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*", ... ] + # sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*"] ` func (_ *Sensors) SampleConfig() string { diff --git a/plugins/inputs/sensors/sensors_notlinux.go b/plugins/inputs/sensors/sensors_notlinux.go new file mode 100644 index 000000000..62a621159 --- /dev/null +++ b/plugins/inputs/sensors/sensors_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package sensors From 50334e6bacd79ec65b1679962277e0ee0c271a69 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 15 Jan 2016 13:15:33 -0700 Subject: [PATCH 070/103] Only compile the sensors plugin if the 'sensors' tag is set --- plugins/inputs/sensors/sensors.go | 2 +- plugins/inputs/sensors/sensors_nocompile.go | 3 +++ plugins/inputs/sensors/sensors_notlinux.go | 3 --- 3 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 plugins/inputs/sensors/sensors_nocompile.go delete mode 100644 plugins/inputs/sensors/sensors_notlinux.go diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 926003b83..18e26278d 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -1,4 +1,4 @@ -// +build linux +// +build linux,sensors package sensors diff --git a/plugins/inputs/sensors/sensors_nocompile.go b/plugins/inputs/sensors/sensors_nocompile.go new file mode 100644 index 000000000..5c38a437b --- /dev/null +++ b/plugins/inputs/sensors/sensors_nocompile.go @@ -0,0 +1,3 @@ +// +build !linux !sensors + +package sensors diff --git a/plugins/inputs/sensors/sensors_notlinux.go b/plugins/inputs/sensors/sensors_notlinux.go deleted file mode 100644 index 62a621159..000000000 --- a/plugins/inputs/sensors/sensors_notlinux.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package sensors From f60c090e4cf58b39923ae6bd1ee8e4b9d8f067d2 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 15 Jan 2016 12:25:56 -0700 Subject: [PATCH 071/103] Add a quiet mode to telegraf closes #514 --- README.md | 4 +++- agent.go | 27 +++++++++++++++++---------- cmd/telegraf/telegraf.go | 8 ++++++++ internal/config/config.go | 9 +++++++-- 4 files changed, 35 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 31a7590fb..6b723787e 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ if you don't have it already. You also must build with golang version 1.4+. ```console $ telegraf -help -Telegraf, The plugin-driven server agent for reporting metrics into InfluxDB +Telegraf, The plugin-driven server agent for collecting and reporting metrics. Usage: @@ -100,6 +100,8 @@ The flags are: -input-filter filter the input plugins to enable, separator is : -output-filter filter the output plugins to enable, separator is : -usage print usage for a plugin, ie, 'telegraf -usage mysql' + -debug print metrics as they're generated to stdout + -quiet run in quiet mode -version print the version to stdout Examples: diff --git a/agent.go b/agent.go index 1af2a1f7c..0c5d58db5 100644 --- a/agent.go +++ b/agent.go @@ -121,8 +121,10 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error { wg.Wait() elapsed := time.Since(start) - log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n", - a.Config.Agent.Interval.Duration, counter, elapsed) + if !a.Config.Agent.Quiet { + log.Printf("Gathered metrics, (%s interval), from %d inputs in %s\n", + a.Config.Agent.Interval.Duration, counter, elapsed) + } return nil } @@ -149,8 +151,10 @@ func (a *Agent) gatherSeparate( } elapsed := time.Since(start) - log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n", - input.Config.Interval, input.Name, elapsed) + if !a.Config.Agent.Quiet { + log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n", + input.Config.Interval, input.Name, elapsed) + } if outerr != nil { return outerr @@ -235,8 +239,10 @@ func (a *Agent) writeOutput( if err == nil { // Write successful elapsed := time.Since(start) - log.Printf("Flushed %d metrics to output %s in %s\n", - len(filtered), ro.Name, elapsed) + if !a.Config.Agent.Quiet { + log.Printf("Flushed %d metrics to output %s in %s\n", + len(filtered), ro.Name, elapsed) + } return } @@ -327,12 +333,13 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration { func (a *Agent) Run(shutdown chan struct{}) error { var wg sync.WaitGroup - a.Config.Agent.FlushInterval.Duration = jitterInterval(a.Config.Agent.FlushInterval.Duration, + a.Config.Agent.FlushInterval.Duration = jitterInterval( + a.Config.Agent.FlushInterval.Duration, a.Config.Agent.FlushJitter.Duration) - log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+ - "Flush Interval:%s\n", - a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, + log.Printf("Agent Config: Interval:%s, Debug:%#v, Quiet:%#v, Hostname:%#v, "+ + "Flush Interval:%s \n", + a.Config.Agent.Interval.Duration, a.Config.Agent.Debug, a.Config.Agent.Quiet, a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) // channel shared between all input threads for accumulating points diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 21e89ce04..a2b5161be 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -16,6 +16,8 @@ import ( var fDebug = flag.Bool("debug", false, "show metrics as they're generated to stdout") +var fQuiet = flag.Bool("quiet", false, + "run in quiet mode") var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") var fConfig = flag.String("config", "", "configuration file to load") var fConfigDirectory = flag.String("config-directory", "", @@ -57,6 +59,8 @@ The flags are: -input-filter filter the input plugins to enable, separator is : -output-filter filter the output plugins to enable, separator is : -usage print usage for a plugin, ie, 'telegraf -usage mysql' + -debug print metrics as they're generated to stdout + -quiet run in quiet mode -version print the version to stdout Examples: @@ -173,6 +177,10 @@ func main() { ag.Config.Agent.Debug = true } + if *fQuiet { + ag.Config.Agent.Quiet = true + } + if *fTest { err = ag.Test() if err != nil { diff --git a/internal/config/config.go b/internal/config/config.go index 6c3d17750..ca4972b69 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -76,8 +76,11 @@ type AgentConfig struct { UTC bool `toml:"utc"` Precision string - // Option for running in debug mode - Debug bool + // Debug is the option for running in debug mode + Debug bool + + // Quiet is the option for running in quiet mode + Quiet bool Hostname string } @@ -279,6 +282,8 @@ var header = `# Telegraf configuration # Run telegraf in debug mode debug = false + # Run telegraf in quiet mode + quiet = false # Override default hostname, if empty use os.Hostname() hostname = "" From b24e71b23210047f0b2c6c0af76aad412127da88 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 15 Jan 2016 14:03:04 -0700 Subject: [PATCH 072/103] Removing old package script, trim Makefile --- Makefile | 12 -- scripts/package.sh | 365 --------------------------------------------- 2 files changed, 377 deletions(-) delete mode 100755 scripts/package.sh diff --git a/Makefile b/Makefile index b9de93ffb..2ae4d1257 100644 --- a/Makefile +++ b/Makefile @@ -21,18 +21,6 @@ dev: prepare "-X main.Version=$(VERSION)" \ ./cmd/telegraf/telegraf.go -# Build linux 64-bit, 32-bit and arm architectures -build-linux-bins: prepare - GOARCH=amd64 GOOS=linux go build -o telegraf_linux_amd64 \ - -ldflags "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go - GOARCH=386 GOOS=linux go build -o telegraf_linux_386 \ - -ldflags "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go - GOARCH=arm GOOS=linux go build -o telegraf_linux_arm \ - -ldflags "-X main.Version=$(VERSION)" \ - ./cmd/telegraf/telegraf.go - # Get dependencies and use gdm to checkout changesets prepare: go get ./... diff --git a/scripts/package.sh b/scripts/package.sh deleted file mode 100755 index fbbf39eb8..000000000 --- a/scripts/package.sh +++ /dev/null @@ -1,365 +0,0 @@ -#!/usr/bin/env bash - -########################################################################### -# Packaging script which creates debian and RPM packages. It optionally -# tags the repo with the given version. -# -# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS -# CLI tools must also be installed. -# -# https://github.com/jordansissel/fpm -# http://aws.amazon.com/cli/ -# -# Packaging process: to package a build, simply execute: -# -# package.sh -# -# The script will automatically determined the version number from git using -# `git describe --always --tags` -# -# AWS upload: the script will also offer to upload the packages to S3. If -# this option is selected, the credentials should be present in the file -# ~/aws.conf. The contents should be of the form: -# -# [default] -# aws_access_key_id= -# aws_secret_access_key= -# region = us-east-1 -# -# Trim the leading spaces when creating the file. The script will exit if -# S3 upload is requested, but this file does not exist. - -AWS_FILE=~/aws.conf - -INSTALL_ROOT_DIR=/opt/telegraf -TELEGRAF_LOG_DIR=/var/log/telegraf -CONFIG_ROOT_DIR=/etc/opt/telegraf -CONFIG_D_DIR=/etc/opt/telegraf/telegraf.d -LOGROTATE_DIR=/etc/logrotate.d - -SAMPLE_CONFIGURATION=etc/telegraf.conf -LOGROTATE_CONFIGURATION=etc/logrotate.d/telegraf -INITD_SCRIPT=scripts/init.sh -SYSTEMD_SCRIPT=scripts/telegraf.service - -TMP_WORK_DIR=`mktemp -d` -POST_INSTALL_PATH=`mktemp` -ARCH=`uname -i` -LICENSE=MIT -URL=influxdb.com -MAINTAINER=support@influxdb.com -VENDOR=InfluxDB -DESCRIPTION="InfluxDB Telegraf agent" -PKG_DEPS=(coreutils) -GO_VERSION="go1.5" -GOPATH_INSTALL= -BINS=( - telegraf - ) - -########################################################################### -# Helper functions. - -# usage prints simple usage information. -usage() { - echo -e "$0\n" - cleanup_exit $1 -} - -# make_dir_tree creates the directory structure within the packages. -make_dir_tree() { - work_dir=$1 - version=$2 - mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts - if [ $? -ne 0 ]; then - echo "Failed to create installation directory -- aborting." - cleanup_exit 1 - fi - mkdir -p $work_dir/$CONFIG_ROOT_DIR - if [ $? -ne 0 ]; then - echo "Failed to create configuration directory -- aborting." - cleanup_exit 1 - fi - mkdir -p $work_dir/$CONFIG_D_DIR - if [ $? -ne 0 ]; then - echo "Failed to create configuration subdirectory -- aborting." - cleanup_exit 1 - fi - mkdir -p $work_dir/$LOGROTATE_DIR - if [ $? -ne 0 ]; then - echo "Failed to create logrotate directory -- aborting." - cleanup_exit 1 - fi - -} - -# cleanup_exit removes all resources created during the process and exits with -# the supplied returned code. -cleanup_exit() { - rm -r $TMP_WORK_DIR - rm $POST_INSTALL_PATH - exit $1 -} - -# check_gopath sanity checks the value of the GOPATH env variable, and determines -# the path where build artifacts are installed. GOPATH may be a colon-delimited -# list of directories. -check_gopath() { - [ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1 - GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1` - [ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1 - echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation." -} - -# check_clean_tree ensures that no source file is locally modified. -check_clean_tree() { - modified=$(git ls-files --modified | wc -l) - if [ $modified -ne 0 ]; then - echo "The source tree is not clean -- aborting." - cleanup_exit 1 - fi - echo "Git tree is clean." -} - -# do_build builds the code. The version and commit must be passed in. -do_build() { - version=$1 - commit=`git rev-parse HEAD` - if [ $? -ne 0 ]; then - echo "Unable to retrieve current commit -- aborting" - cleanup_exit 1 - fi - - for b in ${BINS[*]}; do - rm -f $GOPATH_INSTALL/bin/$b - done - - gdm restore - go install -ldflags="-X main.Version $version" ./... - if [ $? -ne 0 ]; then - echo "Build failed, unable to create package -- aborting" - cleanup_exit 1 - fi - echo "Build completed successfully." -} - -# generate_postinstall_script creates the post-install script for the -# package. It must be passed the version. -generate_postinstall_script() { - version=$1 - cat <$POST_INSTALL_PATH -#!/bin/sh -rm -f $INSTALL_ROOT_DIR/telegraf -rm -f $INSTALL_ROOT_DIR/init.sh -ln -sfn $INSTALL_ROOT_DIR/versions/$version/telegraf $INSTALL_ROOT_DIR/telegraf - -if ! id telegraf >/dev/null 2>&1; then - useradd --help 2>&1| grep -- --system > /dev/null 2>&1 - old_useradd=\$? - if [ \$old_useradd -eq 0 ] - then - useradd --system -U -M telegraf - else - groupadd telegraf && useradd -M -g telegraf telegraf - fi -fi - -# Systemd -if which systemctl > /dev/null 2>&1 ; then - cp $INSTALL_ROOT_DIR/versions/$version/scripts/telegraf.service \ - /lib/systemd/system/telegraf.service - systemctl enable telegraf - - # restart on upgrade of package - if [ "$#" -eq 2 ]; then - systemctl restart telegraf - fi - -# Sysv -else - ln -sfn $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh \ - $INSTALL_ROOT_DIR/init.sh - rm -f /etc/init.d/telegraf - ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/telegraf - chmod +x /etc/init.d/telegraf - # update-rc.d sysv service: - if which update-rc.d > /dev/null 2>&1 ; then - update-rc.d -f telegraf remove - update-rc.d telegraf defaults - # CentOS-style sysv: - else - chkconfig --add telegraf - fi - - # restart on upgrade of package - if [ "$#" -eq 2 ]; then - /etc/init.d/telegraf restart - fi - - mkdir -p $TELEGRAF_LOG_DIR - chown -R -L telegraf:telegraf $TELEGRAF_LOG_DIR -fi - -chown -R -L telegraf:telegraf $INSTALL_ROOT_DIR -chmod -R a+rX $INSTALL_ROOT_DIR - -EOF - echo "Post-install script created successfully at $POST_INSTALL_PATH" -} - -########################################################################### -# Start the packaging process. - -if [ "$1" == "-h" ]; then - usage 0 -elif [ "$1" == "" ]; then - VERSION=`git describe --always --tags | tr -d v` -else - VERSION="$1" -fi - -cd `git rev-parse --show-toplevel` -echo -e "\nStarting package process, version: $VERSION\n" - -check_gopath -do_build $VERSION -make_dir_tree $TMP_WORK_DIR $VERSION - -########################################################################### -# Copy the assets to the installation directories. - -for b in ${BINS[*]}; do - cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION - if [ $? -ne 0 ]; then - echo "Failed to copy binaries to packaging directory -- aborting." - cleanup_exit 1 - fi -done - -echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION" - -cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts -if [ $? -ne 0 ]; then - echo "Failed to copy init.d script to packaging directory -- aborting." - cleanup_exit 1 -fi -echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts" - -cp $SYSTEMD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts -if [ $? -ne 0 ]; then - echo "Failed to copy systemd file to packaging directory -- aborting." - cleanup_exit 1 -fi -echo "$SYSTEMD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts" - -cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/telegraf.conf -if [ $? -ne 0 ]; then - echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting." - cleanup_exit 1 -fi - -cp $LOGROTATE_CONFIGURATION $TMP_WORK_DIR/$LOGROTATE_DIR/telegraf -if [ $? -ne 0 ]; then - echo "Failed to copy $LOGROTATE_CONFIGURATION to packaging directory -- aborting." - cleanup_exit 1 -fi - -generate_postinstall_script $VERSION - -########################################################################### -# Create the actual packages. - -if [ "$CIRCLE_BRANCH" == "" ]; then - echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] " - read response - response=`echo $response | tr 'A-Z' 'a-z'` - if [ "x$response" == "xn" ]; then - echo "Packaging aborted." - cleanup_exit 1 - fi -fi - -if [ $ARCH == "i386" ]; then - rpm_package=telegraf-$VERSION-1.i686.rpm - debian_package=telegraf_${VERSION}_i686.deb - deb_args="-a i686" - rpm_args="setarch i686" -elif [ $ARCH == "arm" ]; then - rpm_package=telegraf-$VERSION-1.armel.rpm - debian_package=telegraf_${VERSION}_armel.deb -else - rpm_package=telegraf-$VERSION-1.x86_64.rpm - debian_package=telegraf_${VERSION}_amd64.deb -fi - -COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE \ - --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH \ - --name telegraf --provides telegraf --version $VERSION --config-files $CONFIG_ROOT_DIR ." -$rpm_args fpm -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS -if [ $? -ne 0 ]; then - echo "Failed to create RPM package -- aborting." - cleanup_exit 1 -fi -echo "RPM package created successfully." - -fpm -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS -if [ $? -ne 0 ]; then - echo "Failed to create Debian package -- aborting." - cleanup_exit 1 -fi -echo "Debian package created successfully." - -########################################################################### -# Offer to publish the packages. - -if [ "$CIRCLE_BRANCH" == "" ]; then - echo -n "Publish packages to S3? [y/N] " - read response - response=`echo $response | tr 'A-Z' 'a-z'` - if [ "x$response" == "xy" ]; then - echo "Publishing packages to S3." - if [ ! -e "$AWS_FILE" ]; then - echo "$AWS_FILE does not exist -- aborting." - cleanup_exit 1 - fi - - # Upload .deb and .rpm packages - for filepath in `ls *.{deb,rpm}`; do - echo "Uploading $filepath to S3" - filename=`basename $filepath` - echo "Uploading $filename to s3://get.influxdb.org/telegraf/$filename" - AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath \ - s3://get.influxdb.org/telegraf/$filename \ - --acl public-read --region us-east-1 - if [ $? -ne 0 ]; then - echo "Upload failed -- aborting". - cleanup_exit 1 - fi - rm $filepath - done - - # Make and upload linux amd64, 386, and arm - make build-linux-bins - for b in `ls telegraf_*`; do - zippedbin=${b}_${VERSION}.tar.gz - # Zip the binary - tar -zcf $TMP_WORK_DIR/$zippedbin ./$b - echo "Uploading binary: $zippedbin to S3" - AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $TMP_WORK_DIR/$zippedbin \ - s3://get.influxdb.org/telegraf/$zippedbin \ - --acl public-read --region us-east-1 - if [ $? -ne 0 ]; then - echo "Binary upload failed -- aborting". - cleanup_exit 1 - fi - done - else - echo "Not publishing packages to S3." - fi -fi - -########################################################################### -# All done. - -echo -e "\nPackaging process complete." -cleanup_exit 0 From 71f4e72b22a40d4a6b9ec509b0b9bff0d66fb86e Mon Sep 17 00:00:00 2001 From: Kostas Botsas Date: Fri, 15 Jan 2016 14:48:45 -0800 Subject: [PATCH 073/103] interval options should have string value also mentioned name_override and name_prefix on top of name_suffix --- plugins/inputs/exec/README.md | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 29203f9a9..0445a2cdd 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -10,7 +10,7 @@ setup the exec plugin with: [[inputs.exec]] command = "/usr/bin/mycollector --output=json" name_suffix = "_mycollector" - interval = 10 + interval = "10s" ``` The name suffix is appended to exec as "exec_name_suffix" to identify the input stream. @@ -27,14 +27,21 @@ Let's say that we have a command with the name_suffix "_mycollector", which give { "a": 0.5, "b": { - "c": "some text", - "d": 0.1, - "e": 5 + "c": 0.1, + "d": 5 } } ``` The collected metrics will be stored as field values under the same measurement "exec_mycollector": ``` - exec_mycollector a=0.5,b_c="some text",b_d=0.1,b_e=5 1452815002357578567 + exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567 +``` + +Other options include: + +Other options for modifying the measurement names are: +``` +name_override = "newname" +name_prefix = "prefix_" ``` From 1421bce371a1d86afb9adc08c42aed24b421c8f0 Mon Sep 17 00:00:00 2001 From: Kostas Botsas Date: Fri, 15 Jan 2016 14:49:53 -0800 Subject: [PATCH 074/103] Update README.md --- plugins/inputs/exec/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 0445a2cdd..bd78f0b3c 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -38,8 +38,6 @@ The collected metrics will be stored as field values under the same measurement exec_mycollector a=0.5,b_c=0.1,b_d=5 1452815002357578567 ``` -Other options include: - Other options for modifying the measurement names are: ``` name_override = "newname" From 40a5bad9688c3c434027894a58d7aed082194db9 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 15 Jan 2016 15:55:41 -0700 Subject: [PATCH 075/103] Update procstat doc --- plugins/inputs/procstat/README.md | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index d2322ab1f..0c37af509 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -16,25 +16,19 @@ individual process specific measurements. Example: ``` - [procstat] +[[inputs.procstat]] + exe = "influxd" + prefix = "influxd" - [[procstat.specifications]] - exe = "influxd" - prefix = "influxd" - - [[procstat.specifications]] - pid_file = "/var/run/lxc/dnsmasq.pid" +[[inputs.procstat]] + pid_file = "/var/run/lxc/dnsmasq.pid" ``` The above configuration would result in output like: ``` -[...] -> [name="dnsmasq" pid="44979"] procstat_cpu_user value=0.14 -> [name="dnsmasq" pid="44979"] procstat_cpu_system value=0.07 -[...] -> [name="influxd" pid="34337"] procstat_influxd_cpu_user value=25.43 -> [name="influxd" pid="34337"] procstat_influxd_cpu_system value=21.82 +> procstat,name="dnsmasq",pid="44979" cpu_user=0.14,cpu_system=0.07 +> procstat,name="influxd",pid="34337" influxd_cpu_user=25.43,influxd_cpu_system=21.82 ``` # Measurements From c483e16d727d5179996ca7839c27d9a63635212e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 15 Jan 2016 15:38:32 -0700 Subject: [PATCH 076/103] Add option to disable statsd name conversion closes #467 closes #532 --- CHANGELOG.md | 1 + plugins/inputs/statsd/statsd.go | 14 +++++-- plugins/inputs/statsd/statsd_test.go | 58 ++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa7f9607a..18a68ebf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! - [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! - [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454! +- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion. ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index d210b55fa..a4b70ffe3 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -35,6 +35,7 @@ type Statsd struct { DeleteCounters bool DeleteSets bool DeleteTimings bool + ConvertNames bool sync.Mutex @@ -63,6 +64,8 @@ func NewStatsd() *Statsd { s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) + s.ConvertNames = true + return &s } @@ -121,6 +124,9 @@ const sampleConfig = ` # Percentiles to calculate for timing & histogram stats percentiles = [90] + # convert measurement names, "." to "_" and "-" to "__" + convert_names = true + # templates = [ # "cpu.* measurement*" # ] @@ -389,8 +395,10 @@ func (s *Statsd) parseName(bucket string) (string, map[string]string) { if err == nil { name, tags, _, _ = p.ApplyTemplate(name) } - name = strings.Replace(name, ".", "_", -1) - name = strings.Replace(name, "-", "__", -1) + if s.ConvertNames { + name = strings.Replace(name, ".", "_", -1) + name = strings.Replace(name, "-", "__", -1) + } return name, tags } @@ -491,6 +499,6 @@ func (s *Statsd) Stop() { func init() { inputs.Add("statsd", func() inputs.Input { - return &Statsd{} + return &Statsd{ConvertNames: true} }) } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 4a97728f2..a8aae2e9e 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -303,6 +303,64 @@ func TestParse_Tags(t *testing.T) { } } +// Test that statsd buckets are parsed to measurement names properly +func TestParseName(t *testing.T) { + s := NewStatsd() + + tests := []struct { + in_name string + out_name string + }{ + { + "foobar", + "foobar", + }, + { + "foo.bar", + "foo_bar", + }, + { + "foo.bar-baz", + "foo_bar__baz", + }, + } + + for _, test := range tests { + name, _ := s.parseName(test.in_name) + if name != test.out_name { + t.Errorf("Expected: %s, got %s", test.out_name, name) + } + } + + // Test with ConvertNames = false + s.ConvertNames = false + + tests = []struct { + in_name string + out_name string + }{ + { + "foobar", + "foobar", + }, + { + "foo.bar", + "foo.bar", + }, + { + "foo.bar-baz", + "foo.bar-baz", + }, + } + + for _, test := range tests { + name, _ := s.parseName(test.in_name) + if name != test.out_name { + t.Errorf("Expected: %s, got %s", test.out_name, name) + } + } +} + // Test that measurements with the same name, but different tags, are treated // as different outputs func TestParse_MeasurementsWithSameName(t *testing.T) { From dbbb2d9877fee44678a674be44d59202004dafce Mon Sep 17 00:00:00 2001 From: Jeff Nickoloff Date: Thu, 7 Jan 2016 09:03:21 -0700 Subject: [PATCH 077/103] NSQ Plugin - Polls a set of NSQD REST endpoints and collects counters for all topics, channels, and clients Signed-off-by: Jeff Nickoloff closes #492 --- plugins/inputs/nsq/nsq.go | 260 +++++++++++++++++++++++++++++++++ plugins/inputs/nsq/nsq_test.go | 202 +++++++++++++++++++++++++ 2 files changed, 462 insertions(+) create mode 100644 plugins/inputs/nsq/nsq.go create mode 100644 plugins/inputs/nsq/nsq_test.go diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go new file mode 100644 index 000000000..678ea8be7 --- /dev/null +++ b/plugins/inputs/nsq/nsq.go @@ -0,0 +1,260 @@ +// The MIT License (MIT) +// +// Copyright (c) 2015 Jeff Nickoloff (jeff@allingeek.com) +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package nsq + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "sync" + "time" + + "github.com/influxdb/telegraf/plugins" +) + +// Might add Lookupd endpoints for cluster discovery +type NSQ struct { + Endpoints []string +} + +var sampleConfig = ` + # An array of NSQD HTTP API endpoints + endpoints = ["http://localhost:4151","http://otherhost:4151"] +` + +const ( + requestPattern = `%s/stats?format=json` +) + +func init() { + plugins.Add("nsq", func() plugins.Plugin { + return &NSQ{} + }) +} + +func (n *NSQ) SampleConfig() string { + return sampleConfig +} + +func (n *NSQ) Description() string { + return "Read NSQ topic and channel statistics." +} + +func (n *NSQ) Gather(acc plugins.Accumulator) error { + var wg sync.WaitGroup + var outerr error + + for _, e := range n.Endpoints { + wg.Add(1) + go func(e string) { + defer wg.Done() + outerr = n.gatherEndpoint(e, acc) + }(e) + } + + wg.Wait() + + return outerr +} + +var tr = &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), +} + +var client = &http.Client{Transport: tr} + +func (n *NSQ) gatherEndpoint(e string, acc plugins.Accumulator) error { + u, err := buildURL(e) + if err != nil { + return err + } + r, err := client.Get(u.String()) + if err != nil { + return fmt.Errorf("Error while polling %s: %s", u.String(), err) + } + defer r.Body.Close() + + if r.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status) + } + + s := &NSQStats{} + err = json.NewDecoder(r.Body).Decode(s) + if err != nil { + return fmt.Errorf(`Error parsing response: %s`, err) + } + + tags := map[string]string{ + `server_host`: u.Host, + `server_version`: s.Data.Version, + } + + if s.Data.Health == `OK` { + acc.Add(`nsq_server_count`, int64(1), tags) + } else { + acc.Add(`nsq_server_count`, int64(0), tags) + } + + acc.Add(`nsq_server_topic_count`, int64(len(s.Data.Topics)), tags) + for _, t := range s.Data.Topics { + topicStats(t, acc, u.Host, s.Data.Version) + } + + return nil +} + +func buildURL(e string) (*url.URL, error) { + u := fmt.Sprintf(requestPattern, e) + addr, err := url.Parse(u) + if err != nil { + return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err) + } + return addr, nil +} + +func topicStats(t TopicStats, acc plugins.Accumulator, host, version string) { + + // per topic overall (tag: name, paused, channel count) + tags := map[string]string{ + `server_host`: host, + `server_version`: version, + `topic`: t.Name, + } + + acc.Add(`nsq_topic_depth`, t.Depth, tags) + acc.Add(`nsq_topic_backend_depth`, t.BackendDepth, tags) + acc.Add(`nsq_topic_message_count`, t.MessageCount, tags) + + acc.Add(`nsq_topic_channel_count`, int64(len(t.Channels)), tags) + for _, c := range t.Channels { + channelStats(c, acc, host, version, t.Name) + } +} + +func channelStats(c ChannelStats, acc plugins.Accumulator, host, version, topic string) { + tags := map[string]string{ + `server_host`: host, + `server_version`: version, + `topic`: topic, + `channel`: c.Name, + } + + acc.Add("nsq_channel_depth", c.Depth, tags) + acc.Add("nsq_channel_backend_depth", c.BackendDepth, tags) + acc.Add("nsq_channel_inflight_count", c.InFlightCount, tags) + acc.Add("nsq_channel_deferred_count", c.DeferredCount, tags) + acc.Add("nsq_channel_message_count", c.MessageCount, tags) + acc.Add("nsq_channel_requeue_count", c.RequeueCount, tags) + acc.Add("nsq_channel_timeout_count", c.TimeoutCount, tags) + + acc.Add("nsq_channel_client_count", int64(len(c.Clients)), tags) + for _, cl := range c.Clients { + clientStats(cl, acc, host, version, topic, c.Name) + } +} + +func clientStats(c ClientStats, acc plugins.Accumulator, host, version, topic, channel string) { + tags := map[string]string{ + `server_host`: host, + `server_version`: version, + `topic`: topic, + `channel`: channel, + `client_name`: c.Name, + `client_id`: c.ID, + `client_hostname`: c.Hostname, + `client_version`: c.Version, + `client_address`: c.RemoteAddress, + `client_user_agent`: c.UserAgent, + `client_tls`: strconv.FormatBool(c.TLS), + `client_snappy`: strconv.FormatBool(c.Snappy), + `client_deflate`: strconv.FormatBool(c.Deflate), + } + acc.Add("nsq_client_ready_count", c.ReadyCount, tags) + acc.Add("nsq_client_inflight_count", c.InFlightCount, tags) + acc.Add("nsq_client_message_count", c.MessageCount, tags) + acc.Add("nsq_client_finish_count", c.FinishCount, tags) + acc.Add("nsq_client_requeue_count", c.RequeueCount, tags) +} + +type NSQStats struct { + Code int64 `json:"status_code"` + Txt string `json:"status_txt"` + Data NSQStatsData `json:"data"` +} + +type NSQStatsData struct { + Version string `json:"version"` + Health string `json:"health"` + StartTime int64 `json:"start_time"` + Topics []TopicStats `json:"topics"` +} + +// e2e_processing_latency is not modeled +type TopicStats struct { + Name string `json:"topic_name"` + Depth int64 `json:"depth"` + BackendDepth int64 `json:"backend_depth"` + MessageCount int64 `json:"message_count"` + Paused bool `json:"paused"` + Channels []ChannelStats `json:"channels"` +} + +// e2e_processing_latency is not modeled +type ChannelStats struct { + Name string `json:"channel_name"` + Depth int64 `json:"depth"` + BackendDepth int64 `json:"backend_depth"` + InFlightCount int64 `json:"in_flight_count"` + DeferredCount int64 `json:"deferred_count"` + MessageCount int64 `json:"message_count"` + RequeueCount int64 `json:"requeue_count"` + TimeoutCount int64 `json:"timeout_count"` + Paused bool `json:"paused"` + Clients []ClientStats `json:"clients"` +} + +type ClientStats struct { + Name string `json:"name"` + ID string `json:"client_id"` + Hostname string `json:"hostname"` + Version string `json:"version"` + RemoteAddress string `json:"remote_address"` + State int64 `json:"state"` + ReadyCount int64 `json:"ready_count"` + InFlightCount int64 `json:"in_flight_count"` + MessageCount int64 `json:"message_count"` + FinishCount int64 `json:"finish_count"` + RequeueCount int64 `json:"requeue_count"` + ConnectTime int64 `json:"connect_ts"` + SampleRate int64 `json:"sample_rate"` + Deflate bool `json:"deflate"` + Snappy bool `json:"snappy"` + UserAgent string `json:"user_agent"` + TLS bool `json:"tls"` + TLSCipherSuite string `json:"tls_cipher_suite"` + TLSVersion string `json:"tls_version"` + TLSNegotiatedProtocol string `json:"tls_negotiated_protocol"` + TLSNegotiatedProtocolIsMutual bool `json:"tls_negotiated_protocol_is_mutual"` +} diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go new file mode 100644 index 000000000..44a205c08 --- /dev/null +++ b/plugins/inputs/nsq/nsq_test.go @@ -0,0 +1,202 @@ +package nsq + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdb/telegraf/testutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNSQStats(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, response) + })) + defer ts.Close() + + n := &NSQ{ + Endpoints: []string{ts.URL}, + } + + var acc testutil.Accumulator + err := n.Gather(&acc) + require.NoError(t, err) + + u, err := url.Parse(ts.URL) + require.NoError(t, err) + host := u.Host + + // actually validate the tests + tests := []struct { + m string + v int64 + g map[string]string + }{ + {`nsq_server_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`}}, + {`nsq_server_topic_count`, int64(2), map[string]string{`server_host`: host, `server_version`: `0.3.6`}}, + {`nsq_topic_depth`, int64(12), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`}}, + {`nsq_topic_backend_depth`, int64(13), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`}}, + {`nsq_topic_message_count`, int64(14), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`}}, + {`nsq_topic_channel_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`}}, + {`nsq_channel_depth`, int64(0), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, + {`nsq_channel_backend_depth`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, + {`nsq_channel_inflight_count`, int64(2), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, + {`nsq_channel_deferred_count`, int64(3), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, + {`nsq_channel_message_count`, int64(4), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, + {`nsq_channel_requeue_count`, int64(5), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, + {`nsq_channel_timeout_count`, int64(6), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, + {`nsq_channel_client_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, + {`nsq_client_ready_count`, int64(200), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, + {`nsq_client_inflight_count`, int64(7), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, + {`nsq_client_message_count`, int64(8), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, + {`nsq_client_finish_count`, int64(9), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, + {`nsq_client_requeue_count`, int64(10), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, + {`nsq_topic_depth`, int64(28), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`}}, + {`nsq_topic_backend_depth`, int64(29), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`}}, + {`nsq_topic_message_count`, int64(30), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`}}, + {`nsq_topic_channel_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`}}, + {`nsq_channel_depth`, int64(15), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, + {`nsq_channel_backend_depth`, int64(16), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, + {`nsq_channel_inflight_count`, int64(17), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, + {`nsq_channel_deferred_count`, int64(18), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, + {`nsq_channel_message_count`, int64(19), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, + {`nsq_channel_requeue_count`, int64(20), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, + {`nsq_channel_timeout_count`, int64(21), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, + {`nsq_channel_client_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, + {`nsq_client_ready_count`, int64(22), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, + {`nsq_client_inflight_count`, int64(23), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, + {`nsq_client_message_count`, int64(24), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, + {`nsq_client_finish_count`, int64(25), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, + {`nsq_client_requeue_count`, int64(26), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, + } + + for _, test := range tests { + assert.True(t, acc.CheckTaggedValue(test.m, test.v, test.g), "Failed expectation: (\"%v\", \"%v\", \"%v\")", test.m, test.v, fmt.Sprint(test.g)) + } +} + +var response = ` +{ + "status_code": 200, + "status_txt": "OK", + "data": { + "version": "0.3.6", + "health": "OK", + "start_time": 1452021674, + "topics": [ + { + "topic_name": "t1", + "channels": [ + { + "channel_name": "c1", + "depth": 0, + "backend_depth": 1, + "in_flight_count": 2, + "deferred_count": 3, + "message_count": 4, + "requeue_count": 5, + "timeout_count": 6, + "clients": [ + { + "name": "373a715cd990", + "client_id": "373a715cd990", + "hostname": "373a715cd990", + "version": "V2", + "remote_address": "172.17.0.11:35560", + "state": 3, + "ready_count": 200, + "in_flight_count": 7, + "message_count": 8, + "finish_count": 9, + "requeue_count": 10, + "connect_ts": 1452021675, + "sample_rate": 11, + "deflate": false, + "snappy": false, + "user_agent": "nsq_to_nsq\/0.3.6 go-nsq\/1.0.5", + "tls": false, + "tls_cipher_suite": "", + "tls_version": "", + "tls_negotiated_protocol": "", + "tls_negotiated_protocol_is_mutual": false + } + ], + "paused": false, + "e2e_processing_latency": { + "count": 0, + "percentiles": null + } + } + ], + "depth": 12, + "backend_depth": 13, + "message_count": 14, + "paused": false, + "e2e_processing_latency": { + "count": 0, + "percentiles": null + } + }, + { + "topic_name": "t2", + "channels": [ + { + "channel_name": "c2", + "depth": 15, + "backend_depth": 16, + "in_flight_count": 17, + "deferred_count": 18, + "message_count": 19, + "requeue_count": 20, + "timeout_count": 21, + "clients": [ + { + "name": "377569bd462b", + "client_id": "377569bd462b", + "hostname": "377569bd462b", + "version": "V2", + "remote_address": "172.17.0.8:48145", + "state": 3, + "ready_count": 22, + "in_flight_count": 23, + "message_count": 24, + "finish_count": 25, + "requeue_count": 26, + "connect_ts": 1452021678, + "sample_rate": 27, + "deflate": true, + "snappy": true, + "user_agent": "go-nsq\/1.0.5", + "tls": true, + "tls_cipher_suite": "", + "tls_version": "", + "tls_negotiated_protocol": "", + "tls_negotiated_protocol_is_mutual": false + } + ], + "paused": false, + "e2e_processing_latency": { + "count": 0, + "percentiles": null + } + } + ], + "depth": 28, + "backend_depth": 29, + "message_count": 30, + "paused": false, + "e2e_processing_latency": { + "count": 0, + "percentiles": null + } + } + ] + } +} +` From 6eea89f4c0d9786be05ef3751d6541d028122d5f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 15 Jan 2016 16:27:24 -0700 Subject: [PATCH 078/103] Make NSQ plugin compatible with version 0.10.0 --- CHANGELOG.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/nsq/nsq.go | 109 +++++++++++++----------- plugins/inputs/nsq/nsq_test.go | 149 ++++++++++++++++++++++++--------- 4 files changed, 172 insertions(+), 88 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 18a68ebf7..0be1ec417 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! - [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454! - [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion. +- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek! ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index b6b1e74da..b4c8553c3 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -19,6 +19,7 @@ import ( _ "github.com/influxdb/telegraf/plugins/inputs/mongodb" _ "github.com/influxdb/telegraf/plugins/inputs/mysql" _ "github.com/influxdb/telegraf/plugins/inputs/nginx" + _ "github.com/influxdb/telegraf/plugins/inputs/nsq" _ "github.com/influxdb/telegraf/plugins/inputs/phpfpm" _ "github.com/influxdb/telegraf/plugins/inputs/ping" _ "github.com/influxdb/telegraf/plugins/inputs/postgresql" diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 678ea8be7..48a709a37 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -31,7 +31,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins" + "github.com/influxdb/telegraf/plugins/inputs" ) // Might add Lookupd endpoints for cluster discovery @@ -41,7 +41,7 @@ type NSQ struct { var sampleConfig = ` # An array of NSQD HTTP API endpoints - endpoints = ["http://localhost:4151","http://otherhost:4151"] + endpoints = ["http://localhost:4151"] ` const ( @@ -49,7 +49,7 @@ const ( ) func init() { - plugins.Add("nsq", func() plugins.Plugin { + inputs.Add("nsq", func() inputs.Input { return &NSQ{} }) } @@ -62,7 +62,7 @@ func (n *NSQ) Description() string { return "Read NSQ topic and channel statistics." } -func (n *NSQ) Gather(acc plugins.Accumulator) error { +func (n *NSQ) Gather(acc inputs.Accumulator) error { var wg sync.WaitGroup var outerr error @@ -85,7 +85,7 @@ var tr = &http.Transport{ var client = &http.Client{Transport: tr} -func (n *NSQ) gatherEndpoint(e string, acc plugins.Accumulator) error { +func (n *NSQ) gatherEndpoint(e string, acc inputs.Accumulator) error { u, err := buildURL(e) if err != nil { return err @@ -111,13 +111,15 @@ func (n *NSQ) gatherEndpoint(e string, acc plugins.Accumulator) error { `server_version`: s.Data.Version, } + fields := make(map[string]interface{}) if s.Data.Health == `OK` { - acc.Add(`nsq_server_count`, int64(1), tags) + fields["server_count"] = int64(1) } else { - acc.Add(`nsq_server_count`, int64(0), tags) + fields["server_count"] = int64(0) } + fields["topic_count"] = int64(len(s.Data.Topics)) - acc.Add(`nsq_server_topic_count`, int64(len(s.Data.Topics)), tags) + acc.AddFields("nsq_server", fields, tags) for _, t := range s.Data.Topics { topicStats(t, acc, u.Host, s.Data.Version) } @@ -134,68 +136,77 @@ func buildURL(e string) (*url.URL, error) { return addr, nil } -func topicStats(t TopicStats, acc plugins.Accumulator, host, version string) { - +func topicStats(t TopicStats, acc inputs.Accumulator, host, version string) { // per topic overall (tag: name, paused, channel count) tags := map[string]string{ - `server_host`: host, - `server_version`: version, - `topic`: t.Name, + "server_host": host, + "server_version": version, + "topic": t.Name, } - acc.Add(`nsq_topic_depth`, t.Depth, tags) - acc.Add(`nsq_topic_backend_depth`, t.BackendDepth, tags) - acc.Add(`nsq_topic_message_count`, t.MessageCount, tags) + fields := map[string]interface{}{ + "depth": t.Depth, + "backend_depth": t.BackendDepth, + "message_count": t.MessageCount, + "channel_count": int64(len(t.Channels)), + } + acc.AddFields("nsq_topic", fields, tags) - acc.Add(`nsq_topic_channel_count`, int64(len(t.Channels)), tags) for _, c := range t.Channels { channelStats(c, acc, host, version, t.Name) } } -func channelStats(c ChannelStats, acc plugins.Accumulator, host, version, topic string) { +func channelStats(c ChannelStats, acc inputs.Accumulator, host, version, topic string) { tags := map[string]string{ - `server_host`: host, - `server_version`: version, - `topic`: topic, - `channel`: c.Name, + "server_host": host, + "server_version": version, + "topic": topic, + "channel": c.Name, } - acc.Add("nsq_channel_depth", c.Depth, tags) - acc.Add("nsq_channel_backend_depth", c.BackendDepth, tags) - acc.Add("nsq_channel_inflight_count", c.InFlightCount, tags) - acc.Add("nsq_channel_deferred_count", c.DeferredCount, tags) - acc.Add("nsq_channel_message_count", c.MessageCount, tags) - acc.Add("nsq_channel_requeue_count", c.RequeueCount, tags) - acc.Add("nsq_channel_timeout_count", c.TimeoutCount, tags) + fields := map[string]interface{}{ + "depth": c.Depth, + "backend_depth": c.BackendDepth, + "inflight_count": c.InFlightCount, + "deferred_count": c.DeferredCount, + "message_count": c.MessageCount, + "requeue_count": c.RequeueCount, + "timeout_count": c.TimeoutCount, + "client_count": int64(len(c.Clients)), + } - acc.Add("nsq_channel_client_count", int64(len(c.Clients)), tags) + acc.AddFields("nsq_channel", fields, tags) for _, cl := range c.Clients { clientStats(cl, acc, host, version, topic, c.Name) } } -func clientStats(c ClientStats, acc plugins.Accumulator, host, version, topic, channel string) { +func clientStats(c ClientStats, acc inputs.Accumulator, host, version, topic, channel string) { tags := map[string]string{ - `server_host`: host, - `server_version`: version, - `topic`: topic, - `channel`: channel, - `client_name`: c.Name, - `client_id`: c.ID, - `client_hostname`: c.Hostname, - `client_version`: c.Version, - `client_address`: c.RemoteAddress, - `client_user_agent`: c.UserAgent, - `client_tls`: strconv.FormatBool(c.TLS), - `client_snappy`: strconv.FormatBool(c.Snappy), - `client_deflate`: strconv.FormatBool(c.Deflate), + "server_host": host, + "server_version": version, + "topic": topic, + "channel": channel, + "client_name": c.Name, + "client_id": c.ID, + "client_hostname": c.Hostname, + "client_version": c.Version, + "client_address": c.RemoteAddress, + "client_user_agent": c.UserAgent, + "client_tls": strconv.FormatBool(c.TLS), + "client_snappy": strconv.FormatBool(c.Snappy), + "client_deflate": strconv.FormatBool(c.Deflate), } - acc.Add("nsq_client_ready_count", c.ReadyCount, tags) - acc.Add("nsq_client_inflight_count", c.InFlightCount, tags) - acc.Add("nsq_client_message_count", c.MessageCount, tags) - acc.Add("nsq_client_finish_count", c.FinishCount, tags) - acc.Add("nsq_client_requeue_count", c.RequeueCount, tags) + + fields := map[string]interface{}{ + "ready_count": c.ReadyCount, + "inflight_count": c.InFlightCount, + "message_count": c.MessageCount, + "finish_count": c.FinishCount, + "requeue_count": c.RequeueCount, + } + acc.AddFields("nsq_client", fields, tags) } type NSQStats struct { diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index 44a205c08..fc34a710b 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -9,7 +9,6 @@ import ( "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -35,49 +34,121 @@ func TestNSQStats(t *testing.T) { // actually validate the tests tests := []struct { m string - v int64 + f map[string]interface{} g map[string]string }{ - {`nsq_server_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`}}, - {`nsq_server_topic_count`, int64(2), map[string]string{`server_host`: host, `server_version`: `0.3.6`}}, - {`nsq_topic_depth`, int64(12), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`}}, - {`nsq_topic_backend_depth`, int64(13), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`}}, - {`nsq_topic_message_count`, int64(14), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`}}, - {`nsq_topic_channel_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`}}, - {`nsq_channel_depth`, int64(0), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, - {`nsq_channel_backend_depth`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, - {`nsq_channel_inflight_count`, int64(2), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, - {`nsq_channel_deferred_count`, int64(3), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, - {`nsq_channel_message_count`, int64(4), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, - {`nsq_channel_requeue_count`, int64(5), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, - {`nsq_channel_timeout_count`, int64(6), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, - {`nsq_channel_client_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`}}, - {`nsq_client_ready_count`, int64(200), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, - {`nsq_client_inflight_count`, int64(7), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, - {`nsq_client_message_count`, int64(8), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, - {`nsq_client_finish_count`, int64(9), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, - {`nsq_client_requeue_count`, int64(10), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t1`, `channel`: `c1`, `client_name`: `373a715cd990`, `client_id`: `373a715cd990`, `client_hostname`: `373a715cd990`, `client_version`: `V2`, `client_address`: `172.17.0.11:35560`, `client_tls`: `false`, `client_snappy`: `false`, `client_deflate`: `false`, `client_user_agent`: `nsq_to_nsq/0.3.6 go-nsq/1.0.5`}}, - {`nsq_topic_depth`, int64(28), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`}}, - {`nsq_topic_backend_depth`, int64(29), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`}}, - {`nsq_topic_message_count`, int64(30), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`}}, - {`nsq_topic_channel_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`}}, - {`nsq_channel_depth`, int64(15), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, - {`nsq_channel_backend_depth`, int64(16), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, - {`nsq_channel_inflight_count`, int64(17), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, - {`nsq_channel_deferred_count`, int64(18), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, - {`nsq_channel_message_count`, int64(19), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, - {`nsq_channel_requeue_count`, int64(20), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, - {`nsq_channel_timeout_count`, int64(21), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, - {`nsq_channel_client_count`, int64(1), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`}}, - {`nsq_client_ready_count`, int64(22), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, - {`nsq_client_inflight_count`, int64(23), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, - {`nsq_client_message_count`, int64(24), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, - {`nsq_client_finish_count`, int64(25), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, - {`nsq_client_requeue_count`, int64(26), map[string]string{`server_host`: host, `server_version`: `0.3.6`, `topic`: `t2`, `channel`: `c2`, `client_name`: `377569bd462b`, `client_id`: `377569bd462b`, `client_hostname`: `377569bd462b`, `client_version`: `V2`, `client_address`: `172.17.0.8:48145`, `client_user_agent`: `go-nsq/1.0.5`, `client_tls`: `true`, `client_snappy`: `true`, `client_deflate`: `true`}}, + { + "nsq_server", + map[string]interface{}{ + "server_count": int64(1), + "topic_count": int64(2), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + }, + }, + { + "nsq_topic", + map[string]interface{}{ + "depth": int64(12), + "backend_depth": int64(13), + "message_count": int64(14), + "channel_count": int64(1), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + "topic": "t1"}, + }, + { + "nsq_channel", + map[string]interface{}{ + "depth": int64(0), + "backend_depth": int64(1), + "inflight_count": int64(2), + "deferred_count": int64(3), + "message_count": int64(4), + "requeue_count": int64(5), + "timeout_count": int64(6), + "client_count": int64(1), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + "topic": "t1", + "channel": "c1", + }, + }, + { + "nsq_client", + map[string]interface{}{ + "ready_count": int64(200), + "inflight_count": int64(7), + "message_count": int64(8), + "finish_count": int64(9), + "requeue_count": int64(10), + }, + map[string]string{"server_host": host, "server_version": "0.3.6", + "topic": "t1", "channel": "c1", "client_name": "373a715cd990", + "client_id": "373a715cd990", "client_hostname": "373a715cd990", + "client_version": "V2", "client_address": "172.17.0.11:35560", + "client_tls": "false", "client_snappy": "false", + "client_deflate": "false", + "client_user_agent": "nsq_to_nsq/0.3.6 go-nsq/1.0.5"}, + }, + { + "nsq_topic", + map[string]interface{}{ + "depth": int64(28), + "backend_depth": int64(29), + "message_count": int64(30), + "channel_count": int64(1), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + "topic": "t2"}, + }, + { + "nsq_channel", + map[string]interface{}{ + "depth": int64(15), + "backend_depth": int64(16), + "inflight_count": int64(17), + "deferred_count": int64(18), + "message_count": int64(19), + "requeue_count": int64(20), + "timeout_count": int64(21), + "client_count": int64(1), + }, + map[string]string{ + "server_host": host, + "server_version": "0.3.6", + "topic": "t2", + "channel": "c2", + }, + }, + { + "nsq_client", + map[string]interface{}{ + "ready_count": int64(22), + "inflight_count": int64(23), + "message_count": int64(24), + "finish_count": int64(25), + "requeue_count": int64(26), + }, + map[string]string{"server_host": host, "server_version": "0.3.6", + "topic": "t2", "channel": "c2", "client_name": "377569bd462b", + "client_id": "377569bd462b", "client_hostname": "377569bd462b", + "client_version": "V2", "client_address": "172.17.0.8:48145", + "client_user_agent": "go-nsq/1.0.5", "client_tls": "true", + "client_snappy": "true", "client_deflate": "true"}, + }, } for _, test := range tests { - assert.True(t, acc.CheckTaggedValue(test.m, test.v, test.g), "Failed expectation: (\"%v\", \"%v\", \"%v\")", test.m, test.v, fmt.Sprint(test.g)) + acc.AssertContainsTaggedFields(t, test.m, test.f, test.g) } } From ef5c630d3a3691b20f3da42e703f5843236bb8bc Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Thu, 7 Jan 2016 19:26:33 -0500 Subject: [PATCH 079/103] Add Graphite output --- plugins/outputs/all/all.go | 1 + plugins/outputs/graphite/README.md | 13 +++ plugins/outputs/graphite/graphite.go | 134 ++++++++++++++++++++++ plugins/outputs/graphite/graphite_test.go | 104 +++++++++++++++++ 4 files changed, 252 insertions(+) create mode 100644 plugins/outputs/graphite/README.md create mode 100644 plugins/outputs/graphite/graphite.go create mode 100644 plugins/outputs/graphite/graphite_test.go diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 8a0d24f94..7eedb592a 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -4,6 +4,7 @@ import ( _ "github.com/influxdb/telegraf/plugins/outputs/amon" _ "github.com/influxdb/telegraf/plugins/outputs/amqp" _ "github.com/influxdb/telegraf/plugins/outputs/datadog" + _ "github.com/influxdb/telegraf/plugins/outputs/graphite" _ "github.com/influxdb/telegraf/plugins/outputs/influxdb" _ "github.com/influxdb/telegraf/plugins/outputs/kafka" _ "github.com/influxdb/telegraf/plugins/outputs/kinesis" diff --git a/plugins/outputs/graphite/README.md b/plugins/outputs/graphite/README.md new file mode 100644 index 000000000..48313a886 --- /dev/null +++ b/plugins/outputs/graphite/README.md @@ -0,0 +1,13 @@ +# Graphite Output Plugin + +This plugin writes to [Graphite](http://graphite.readthedocs.org/en/latest/index.html) via raw TCP. + +Parameters: + + Servers []string + Prefix string + Timeout int + +* `servers`: List of strings, ["mygraphiteserver:2003"]. +* `prefix`: String use to prefix all sent metrics. +* `timeout`: Connection timeout in second. diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go new file mode 100644 index 000000000..405b1fe7a --- /dev/null +++ b/plugins/outputs/graphite/graphite.go @@ -0,0 +1,134 @@ +package graphite + +import ( + "errors" + "fmt" + "github.com/influxdb/influxdb/client/v2" + "github.com/influxdb/telegraf/plugins/outputs" + "log" + "math/rand" + "net" + "strings" + "time" +) + +type Graphite struct { + // URL is only for backwards compatability + Servers []string + Prefix string + Timeout int + conns []net.Conn +} + +var sampleConfig = ` + # TCP raw endpoint for your graphite instance. + servers = ["mygraphiteserver:2003"] # default "localhost:2003" + # Prefix metrics name + prefix = "" # default "" + # Connection timeout in second (for the connection with Carbon(Graphite)) + timeout = 2 # default 2s +` + +func (g *Graphite) Connect() error { + // Set default values + if g.Timeout <= 0 { + g.Timeout = 2 + } + if len(g.Servers) == 0 { + g.Servers = append(g.Servers, "localhost:2003") + } + // Get Connections + var conns []net.Conn + for _, server := range g.Servers { + conn, err := net.DialTimeout("tcp", server, time.Duration(g.Timeout)*time.Second) + if err == nil { + conns = append(conns, conn) + } + } + g.conns = conns + return nil +} + +func (g *Graphite) Close() error { + // Closing all connections + for _, conn := range g.conns { + conn.Close() + } + return nil +} + +func (g *Graphite) SampleConfig() string { + return sampleConfig +} + +func (g *Graphite) Description() string { + return "Configuration for Graphite server to send metrics to using TCP raw protocol" +} + +// Choose a random server in the cluster to write to until a successful write +// occurs, logging each unsuccessful. If all servers fail, return error. +func (g *Graphite) Write(points []*client.Point) error { + // Prepare data + var bp []string + for _, point := range points { + // Get name + name := point.Name() + // Convert UnixNano to Unix timestamps + timestamp := point.UnixNano() / 1000000000 + + for field_name, value := range point.Fields() { + // Convert value + value_str := fmt.Sprintf("%#v", value) + // Write graphite point + var graphitePoint string + if name == field_name { + graphitePoint = fmt.Sprintf("%s.%s %s %d\n", + strings.Replace(point.Tags()["host"], ".", "_", -1), + strings.Replace(name, ".", "_", -1), + value_str, + timestamp) + } else { + graphitePoint = fmt.Sprintf("%s.%s.%s %s %d\n", + strings.Replace(point.Tags()["host"], ".", "_", -1), + strings.Replace(name, ".", "_", -1), + strings.Replace(field_name, ".", "_", -1), + value_str, + timestamp) + } + if g.Prefix != "" { + graphitePoint = fmt.Sprintf("%s.%s", g.Prefix, graphitePoint) + } + bp = append(bp, graphitePoint) + //fmt.Printf(graphitePoint) + } + } + graphitePoints := strings.Join(bp, "") + + // This will get set to nil if a successful write occurs + err := errors.New("Could not write to any Graphite server in cluster\n") + + // Send data to a random server + p := rand.Perm(len(g.conns)) + for _, n := range p { + if _, e := fmt.Fprintf(g.conns[n], graphitePoints); e != nil { + // Error + log.Println("ERROR: " + err.Error()) + // Let's try the next one + } else { + // Success + err = nil + break + } + } + // try to reconnect + if err != nil { + g.Connect() + } + return err +} + +func init() { + outputs.Add("graphite", func() outputs.Output { + return &Graphite{} + }) +} diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go new file mode 100644 index 000000000..e9000c3c7 --- /dev/null +++ b/plugins/outputs/graphite/graphite_test.go @@ -0,0 +1,104 @@ +package graphite + +import ( + "bufio" + "net" + "net/textproto" + "sync" + "testing" + "time" + + "github.com/influxdb/influxdb/client/v2" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGraphiteError(t *testing.T) { + // Init plugin + g := Graphite{ + Servers: []string{"127.0.0.1:2003", "127.0.0.1:12003"}, + Prefix: "my.prefix", + } + // Init points + pt1, _ := client.NewPoint( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"mymeasurement": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + // Prepare point list + var points []*client.Point + points = append(points, pt1) + // Error + err1 := g.Connect() + require.NoError(t, err1) + err2 := g.Write(points) + require.Error(t, err2) + assert.Equal(t, "Could not write to any Graphite server in cluster\n", err2.Error()) +} + +func TestGraphiteOK(t *testing.T) { + var wg sync.WaitGroup + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + } + // Init points + pt1, _ := client.NewPoint( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"mymeasurement": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + pt2, _ := client.NewPoint( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + pt3, _ := client.NewPoint( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + // Prepare point list + var points []*client.Point + points = append(points, pt1) + points = append(points, pt2) + points = append(points, pt3) + // Start TCP server + wg.Add(1) + go TCPServer(t, &wg) + wg.Wait() + // Connect + wg.Add(1) + err1 := g.Connect() + wg.Wait() + require.NoError(t, err1) + // Send Data + err2 := g.Write(points) + require.NoError(t, err2) + wg.Add(1) + // Waiting TCPserver + wg.Wait() + g.Close() +} + +func TCPServer(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + wg.Done() + conn, _ := tcpServer.Accept() + wg.Done() + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + data1, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data1) + data2, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement.value 3.14 1289430000", data2) + data3, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.my_measurement.value 3.14 1289430000", data3) + conn.Close() + wg.Done() +} From 8cbdf0f9078767e207be267e702ec5904f1b3db3 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Sat, 16 Jan 2016 17:29:02 -0700 Subject: [PATCH 080/103] Tweak config messages for graphite. Update changelog and readme closes #494 --- CHANGELOG.md | 1 + README.md | 2 ++ plugins/outputs/graphite/graphite.go | 12 ++++++------ 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0be1ec417..f985f210d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454! - [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion. - [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek! +- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert! ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/README.md b/README.md index 6b723787e..539895899 100644 --- a/README.md +++ b/README.md @@ -152,6 +152,7 @@ Currently implemented sources: * mongodb * mysql * nginx +* nsq * phpfpm * ping * postgresql @@ -188,6 +189,7 @@ want to add support for another service or third-party API. * amon * amqp * datadog +* graphite * kafka * amazon kinesis * librato diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 405b1fe7a..dd2af8eb1 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -21,12 +21,12 @@ type Graphite struct { } var sampleConfig = ` - # TCP raw endpoint for your graphite instance. - servers = ["mygraphiteserver:2003"] # default "localhost:2003" + # TCP endpoint for your graphite instance. + servers = ["localhost:2003"] # Prefix metrics name - prefix = "" # default "" - # Connection timeout in second (for the connection with Carbon(Graphite)) - timeout = 2 # default 2s + prefix = "" + # timeout in seconds for the write connection to graphite + timeout = 2 ` func (g *Graphite) Connect() error { @@ -62,7 +62,7 @@ func (g *Graphite) SampleConfig() string { } func (g *Graphite) Description() string { - return "Configuration for Graphite server to send metrics to using TCP raw protocol" + return "Configuration for Graphite server to send metrics to" } // Choose a random server in the cluster to write to until a successful write From 37b96c192b44e6e15c3e0a54c1fa67694d5e8981 Mon Sep 17 00:00:00 2001 From: Eugene Dementiev Date: Fri, 15 Jan 2016 15:35:43 +0300 Subject: [PATCH 081/103] output amqp: Add ssl support closes #536 --- CHANGELOG.md | 1 + plugins/outputs/amqp/amqp.go | 41 +++++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f985f210d..d65b8c67c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion. - [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek! - [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert! +- AMQP SSL support. Thanks @ekini! ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 6f0e0fde3..e1d6302a1 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -2,7 +2,10 @@ package amqp import ( "bytes" + "crypto/tls" + "crypto/x509" "fmt" + "io/ioutil" "log" "sync" "time" @@ -17,6 +20,12 @@ type AMQP struct { URL string // AMQP exchange Exchange string + // path to CA file + SslCa string + // path to host cert file + SslCert string + // path to cert key file + SslKey string // Routing Key Tag RoutingTag string `toml:"routing_tag"` // InfluxDB database @@ -46,6 +55,11 @@ var sampleConfig = ` # ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" + # Use ssl + #ssl_ca = "/etc/telegraf/ca.pem" + #ssl_cert = "/etc/telegraf/cert.pem" + #ssl_key = "/etc/telegraf/key.pem" + # InfluxDB retention policy #retention_policy = "default" # InfluxDB database @@ -64,7 +78,32 @@ func (q *AMQP) Connect() error { "retention_policy": q.RetentionPolicy, } - connection, err := amqp.Dial(q.URL) + var connection *amqp.Connection + var err error + if q.SslCert != "" && q.SslKey != "" { + // make new tls config + cfg := new(tls.Config) + if q.SslCa != "" { + // create ca pool + cfg.RootCAs = x509.NewCertPool() + + // add self-signed cert + if ca, err := ioutil.ReadFile(q.SslCa); err == nil { + cfg.RootCAs.AppendCertsFromPEM(ca) + } else { + log.Println(err) + } + } + if cert, err := tls.LoadX509KeyPair(q.SslCert, q.SslKey); err == nil { + cfg.Certificates = append(cfg.Certificates, cert) + } else { + log.Println(err) + } + connection, err = amqp.DialTLS(q.URL, cfg) + + } else { + connection, err = amqp.Dial(q.URL) + } if err != nil { return err } From a712036b562409e47c91589c0f963d91d61bc7ff Mon Sep 17 00:00:00 2001 From: Eugene Dementiev Date: Fri, 15 Jan 2016 15:36:41 +0300 Subject: [PATCH 082/103] core: print error on output connect fail closes #537 --- agent.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent.go b/agent.go index 0c5d58db5..25fd46462 100644 --- a/agent.go +++ b/agent.go @@ -58,7 +58,7 @@ func (a *Agent) Connect() error { } err := o.Output.Connect() if err != nil { - log.Printf("Failed to connect to output %s, retrying in 15s\n", o.Name) + log.Printf("Failed to connect to output %s, retrying in 15s, error was '%s' \n", o.Name, err) time.Sleep(15 * time.Second) err = o.Output.Connect() if err != nil { From 5af697479659bc31e451dd5a0b22cd79486f8cbe Mon Sep 17 00:00:00 2001 From: Vinh Date: Thu, 14 Jan 2016 14:20:59 -0800 Subject: [PATCH 083/103] phpfpm plugin: enhance socket gathering and config - If we detect errors when gathering stat via socket, return those error so it canbe appear in Telegraf log - Improve fcgi client, also upgrade it to current version of Go at https://golang.org/src/net/http/fcgi/fcgi.go - Add test for unix socket and fcgi to remotely connect but only as an extra url field. - Allow customization of fpm status path - Document about using of `host` in case `unixsocket` that it isn't used - Documet upgrade for new data layout closes #499 closes #502 closes #538 --- plugins/inputs/phpfpm/README.md | 46 ++--- plugins/inputs/phpfpm/phpfpm.go | 138 +++++++++------ plugins/inputs/phpfpm/phpfpm_fcgi.go | 245 +++++++++++++++------------ plugins/inputs/phpfpm/phpfpm_test.go | 182 ++++++++++++++++++-- 4 files changed, 406 insertions(+), 205 deletions(-) diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md index c2a42523a..b853b7fd7 100644 --- a/plugins/inputs/phpfpm/README.md +++ b/plugins/inputs/phpfpm/README.md @@ -6,10 +6,14 @@ Get phpfpm stat using either HTTP status page or fpm socket. Meta: -- tags: `url= pool=poolname` +- tags: `pool=poolname` Measurement names: +- phpfpm + +Measurement field: + - accepted_conn - listen_queue - max_listen_queue @@ -50,36 +54,12 @@ It produces: ``` * Plugin: phpfpm, Collection 1 -> [url="10.0.0.12" pool="www"] phpfpm_idle_processes value=1 -> [url="10.0.0.12" pool="www"] phpfpm_total_processes value=2 -> [url="10.0.0.12" pool="www"] phpfpm_max_children_reached value=0 -> [url="10.0.0.12" pool="www"] phpfpm_max_listen_queue value=0 -> [url="10.0.0.12" pool="www"] phpfpm_listen_queue value=0 -> [url="10.0.0.12" pool="www"] phpfpm_listen_queue_len value=0 -> [url="10.0.0.12" pool="www"] phpfpm_active_processes value=1 -> [url="10.0.0.12" pool="www"] phpfpm_max_active_processes value=2 -> [url="10.0.0.12" pool="www"] phpfpm_slow_requests value=0 -> [url="10.0.0.12" pool="www"] phpfpm_accepted_conn value=305 - -> [url="localhost" pool="www2"] phpfpm_max_children_reached value=0 -> [url="localhost" pool="www2"] phpfpm_slow_requests value=0 -> [url="localhost" pool="www2"] phpfpm_max_listen_queue value=0 -> [url="localhost" pool="www2"] phpfpm_active_processes value=1 -> [url="localhost" pool="www2"] phpfpm_listen_queue_len value=0 -> [url="localhost" pool="www2"] phpfpm_idle_processes value=1 -> [url="localhost" pool="www2"] phpfpm_total_processes value=2 -> [url="localhost" pool="www2"] phpfpm_max_active_processes value=2 -> [url="localhost" pool="www2"] phpfpm_accepted_conn value=306 -> [url="localhost" pool="www2"] phpfpm_listen_queue value=0 - -> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_children_reached value=0 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_slow_requests value=1 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_listen_queue value=0 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_active_processes value=1 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue_len value=0 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_idle_processes value=2 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_total_processes value=2 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_max_active_processes value=2 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_accepted_conn value=307 -> [url="10.0.0.12:9000" pool="www3"] phpfpm_listen_queue value=0 +> phpfpm,pool=www accepted_conn=13i,active_processes=2i,idle_processes=1i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083331187 +> phpfpm,pool=www2 accepted_conn=12i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691422 +> phpfpm,pool=www3 accepted_conn=11i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691658 ``` + +## Note + +When using `unixsocket`, you have to ensure that telegraf runs on same +host, and socket path is accessible to telegraf user. diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index ceffc673e..5600334b2 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -7,6 +7,7 @@ import ( "io" "net/http" "net/url" + "os" "strconv" "strings" "sync" @@ -40,20 +41,25 @@ type phpfpm struct { var sampleConfig = ` # An array of addresses to gather stats about. Specify an ip or hostname - # with optional port and path. + # with optional port and path # - # Plugin can be configured in three modes (both can be used): - # - http: the URL must start with http:// or https://, ex: + # Plugin can be configured in three modes (either can be used): + # - http: the URL must start with http:// or https://, ie: # "http://localhost/status" # "http://192.168.130.1/status?full" - # - unixsocket: path to fpm socket, ex: + # + # - unixsocket: path to fpm socket, ie: # "/var/run/php5-fpm.sock" - # "192.168.10.10:/var/run/php5-fpm-www2.sock" - # - fcgi: the URL mush start with fcgi:// or cgi://, and port must present, ex: + # or using a custom fpm status path: + # "/var/run/php5-fpm.sock:fpm-custom-status-path" + # + # - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: # "fcgi://10.0.0.12:9000/status" # "cgi://10.0.10.12:9001/status" # - # If no servers are specified, then default to 127.0.0.1/server-status + # Example of multiple gathering from local socket and remove host + # urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] + # If no servers are specified, then default to http://127.0.0.1/status urls = ["http://localhost/status"] ` @@ -62,7 +68,7 @@ func (r *phpfpm) SampleConfig() string { } func (r *phpfpm) Description() string { - return "Read metrics of phpfpm, via HTTP status page or socket(pending)" + return "Read metrics of phpfpm, via HTTP status page or socket" } // Reads stats from all configured servers accumulates stats. @@ -89,71 +95,96 @@ func (g *phpfpm) Gather(acc inputs.Accumulator) error { return outerr } -// Request status page to get stat raw data +// Request status page to get stat raw data and import it func (g *phpfpm) gatherServer(addr string, acc inputs.Accumulator) error { if g.client == nil { - client := &http.Client{} g.client = client } if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { + return g.gatherHttp(addr, acc) + } + + var ( + fcgi *conn + socketPath string + statusPath string + ) + + if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { u, err := url.Parse(addr) if err != nil { return fmt.Errorf("Unable parse server address '%s': %s", addr, err) } - - req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme, - u.Host, u.Path), nil) - res, err := g.client.Do(req) - if err != nil { - return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v", - addr, err) - } - - if res.StatusCode != 200 { - return fmt.Errorf("Unable to get valid stat result from '%s': %v", - addr, err) - } - - importMetric(res.Body, acc, u.Host) + socketAddr := strings.Split(u.Host, ":") + fcgiIp := socketAddr[0] + fcgiPort, _ := strconv.Atoi(socketAddr[1]) + fcgi, _ = NewClient(fcgiIp, fcgiPort) } else { - var ( - fcgi *FCGIClient - fcgiAddr string - ) - if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { - u, err := url.Parse(addr) - if err != nil { - return fmt.Errorf("Unable parse server address '%s': %s", addr, err) - } - socketAddr := strings.Split(u.Host, ":") - fcgiIp := socketAddr[0] - fcgiPort, _ := strconv.Atoi(socketAddr[1]) - fcgiAddr = u.Host - fcgi, _ = NewClient(fcgiIp, fcgiPort) + socketAddr := strings.Split(addr, ":") + if len(socketAddr) >= 2 { + socketPath = socketAddr[0] + statusPath = socketAddr[1] } else { - socketAddr := strings.Split(addr, ":") - fcgiAddr = socketAddr[0] - fcgi, _ = NewClient("unix", socketAddr[1]) - } - resOut, resErr, err := fcgi.Request(map[string]string{ - "SCRIPT_NAME": "/status", - "SCRIPT_FILENAME": "status", - "REQUEST_METHOD": "GET", - }, "") - - if len(resErr) == 0 && err == nil { - importMetric(bytes.NewReader(resOut), acc, fcgiAddr) + socketPath = socketAddr[0] + statusPath = "status" } + if _, err := os.Stat(socketPath); os.IsNotExist(err) { + return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err) + } + fcgi, _ = NewClient("unix", socketPath) + } + return g.gatherFcgi(fcgi, statusPath, acc) +} + +// Gather stat using fcgi protocol +func (g *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc inputs.Accumulator) error { + fpmOutput, fpmErr, err := fcgi.Request(map[string]string{ + "SCRIPT_NAME": "/" + statusPath, + "SCRIPT_FILENAME": statusPath, + "REQUEST_METHOD": "GET", + "CONTENT_LENGTH": "0", + "SERVER_PROTOCOL": "HTTP/1.0", + "SERVER_SOFTWARE": "go / fcgiclient ", + "REMOTE_ADDR": "127.0.0.1", + }, "/"+statusPath) + + if len(fpmErr) == 0 && err == nil { + importMetric(bytes.NewReader(fpmOutput), acc) + return nil + } else { + return fmt.Errorf("Unable parse phpfpm status. Error: %v %v", string(fpmErr), err) + } +} + +// Gather stat using http protocol +func (g *phpfpm) gatherHttp(addr string, acc inputs.Accumulator) error { + u, err := url.Parse(addr) + if err != nil { + return fmt.Errorf("Unable parse server address '%s': %s", addr, err) } + req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme, + u.Host, u.Path), nil) + res, err := g.client.Do(req) + if err != nil { + return fmt.Errorf("Unable to connect to phpfpm status page '%s': %v", + addr, err) + } + + if res.StatusCode != 200 { + return fmt.Errorf("Unable to get valid stat result from '%s': %v", + addr, err) + } + + importMetric(res.Body, acc) return nil } -// Import HTTP stat data into Telegraf system -func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, error) { +// Import stat data into Telegraf system +func importMetric(r io.Reader, acc inputs.Accumulator) (poolStat, error) { stats := make(poolStat) var currentPool string @@ -195,7 +226,6 @@ func importMetric(r io.Reader, acc inputs.Accumulator, host string) (poolStat, e // Finally, we push the pool metric for pool := range stats { tags := map[string]string{ - "url": host, "pool": pool, } fields := make(map[string]interface{}) diff --git a/plugins/inputs/phpfpm/phpfpm_fcgi.go b/plugins/inputs/phpfpm/phpfpm_fcgi.go index 65f4c789b..03aac7634 100644 --- a/plugins/inputs/phpfpm/phpfpm_fcgi.go +++ b/plugins/inputs/phpfpm/phpfpm_fcgi.go @@ -1,13 +1,14 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fcgi implements the FastCGI protocol. +// Currently only the responder role is supported. +// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 package phpfpm -// FastCGI client to request via socket - -// Copyright 2012 Junqing Tan and The Go Authors -// Use of this source code is governed by a BSD-style -// Part of source code is from Go fcgi package - -// Fix bug: Can't recive more than 1 record untill FCGI_END_REQUEST 2012-09-15 -// By: wofeiwo +// This file defines the raw protocol and some utilities used by the child and +// the host. import ( "bufio" @@ -15,70 +16,84 @@ import ( "encoding/binary" "errors" "io" + "sync" + "net" "strconv" - "sync" + + "strings" ) -const FCGI_LISTENSOCK_FILENO uint8 = 0 -const FCGI_HEADER_LEN uint8 = 8 -const VERSION_1 uint8 = 1 -const FCGI_NULL_REQUEST_ID uint8 = 0 -const FCGI_KEEP_CONN uint8 = 1 +// recType is a record type, as defined by +// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8 +type recType uint8 const ( - FCGI_BEGIN_REQUEST uint8 = iota + 1 - FCGI_ABORT_REQUEST - FCGI_END_REQUEST - FCGI_PARAMS - FCGI_STDIN - FCGI_STDOUT - FCGI_STDERR - FCGI_DATA - FCGI_GET_VALUES - FCGI_GET_VALUES_RESULT - FCGI_UNKNOWN_TYPE - FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE + typeBeginRequest recType = 1 + typeAbortRequest recType = 2 + typeEndRequest recType = 3 + typeParams recType = 4 + typeStdin recType = 5 + typeStdout recType = 6 + typeStderr recType = 7 + typeData recType = 8 + typeGetValues recType = 9 + typeGetValuesResult recType = 10 + typeUnknownType recType = 11 ) -const ( - FCGI_RESPONDER uint8 = iota + 1 - FCGI_AUTHORIZER - FCGI_FILTER -) +// keep the connection between web-server and responder open after request +const flagKeepConn = 1 const ( - FCGI_REQUEST_COMPLETE uint8 = iota - FCGI_CANT_MPX_CONN - FCGI_OVERLOADED - FCGI_UNKNOWN_ROLE -) - -const ( - FCGI_MAX_CONNS string = "MAX_CONNS" - FCGI_MAX_REQS string = "MAX_REQS" - FCGI_MPXS_CONNS string = "MPXS_CONNS" -) - -const ( - maxWrite = 6553500 // maximum record body + maxWrite = 65535 // maximum record body maxPad = 255 ) +const ( + roleResponder = iota + 1 // only Responders are implemented. + roleAuthorizer + roleFilter +) + +const ( + statusRequestComplete = iota + statusCantMultiplex + statusOverloaded + statusUnknownRole +) + +const headerLen = 8 + type header struct { Version uint8 - Type uint8 + Type recType Id uint16 ContentLength uint16 PaddingLength uint8 Reserved uint8 } +type beginRequest struct { + role uint16 + flags uint8 + reserved [5]uint8 +} + +func (br *beginRequest) read(content []byte) error { + if len(content) != 8 { + return errors.New("fcgi: invalid begin request record") + } + br.role = binary.BigEndian.Uint16(content) + br.flags = content[2] + return nil +} + // for padding so we don't have to allocate all the time // not synchronized because we don't care what the contents are var pad [maxPad]byte -func (h *header) init(recType uint8, reqId uint16, contentLength int) { +func (h *header) init(recType recType, reqId uint16, contentLength int) { h.Version = 1 h.Type = recType h.Id = reqId @@ -86,6 +101,26 @@ func (h *header) init(recType uint8, reqId uint16, contentLength int) { h.PaddingLength = uint8(-contentLength & 7) } +// conn sends records over rwc +type conn struct { + mutex sync.Mutex + rwc io.ReadWriteCloser + + // to avoid allocations + buf bytes.Buffer + h header +} + +func newConn(rwc io.ReadWriteCloser) *conn { + return &conn{rwc: rwc} +} + +func (c *conn) Close() error { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.rwc.Close() +} + type record struct { h header buf [maxWrite + maxPad]byte @@ -109,69 +144,39 @@ func (r *record) content() []byte { return r.buf[:r.h.ContentLength] } -type FCGIClient struct { - mutex sync.Mutex - rwc io.ReadWriteCloser - h header - buf bytes.Buffer - keepAlive bool -} - -func NewClient(h string, args ...interface{}) (fcgi *FCGIClient, err error) { - var conn net.Conn - if len(args) != 1 { - err = errors.New("fcgi: not enough params") - return - } - switch args[0].(type) { - case int: - addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10) - conn, err = net.Dial("tcp", addr) - case string: - laddr := net.UnixAddr{Name: args[0].(string), Net: h} - conn, err = net.DialUnix(h, nil, &laddr) - default: - err = errors.New("fcgi: we only accept int (port) or string (socket) params.") - } - fcgi = &FCGIClient{ - rwc: conn, - keepAlive: false, - } - return -} - -func (client *FCGIClient) writeRecord(recType uint8, reqId uint16, content []byte) (err error) { - client.mutex.Lock() - defer client.mutex.Unlock() - client.buf.Reset() - client.h.init(recType, reqId, len(content)) - if err := binary.Write(&client.buf, binary.BigEndian, client.h); err != nil { +// writeRecord writes and sends a single record. +func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { + c.mutex.Lock() + defer c.mutex.Unlock() + c.buf.Reset() + c.h.init(recType, reqId, len(b)) + if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { return err } - if _, err := client.buf.Write(content); err != nil { + if _, err := c.buf.Write(b); err != nil { return err } - if _, err := client.buf.Write(pad[:client.h.PaddingLength]); err != nil { + if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil { return err } - _, err = client.rwc.Write(client.buf.Bytes()) + _, err := c.rwc.Write(c.buf.Bytes()) return err } -func (client *FCGIClient) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { +func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { b := [8]byte{byte(role >> 8), byte(role), flags} - return client.writeRecord(FCGI_BEGIN_REQUEST, reqId, b[:]) + return c.writeRecord(typeBeginRequest, reqId, b[:]) } -func (client *FCGIClient) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { +func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { b := make([]byte, 8) binary.BigEndian.PutUint32(b, uint32(appStatus)) b[4] = protocolStatus - return client.writeRecord(FCGI_END_REQUEST, reqId, b) + return c.writeRecord(typeEndRequest, reqId, b) } -func (client *FCGIClient) writePairs(recType uint8, reqId uint16, pairs map[string]string) error { - w := newWriter(client, recType, reqId) +func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error { + w := newWriter(c, recType, reqId) b := make([]byte, 8) for k, v := range pairs { n := encodeSize(b, uint32(len(k))) @@ -238,7 +243,7 @@ func (w *bufWriter) Close() error { return w.closer.Close() } -func newWriter(c *FCGIClient, recType uint8, reqId uint16) *bufWriter { +func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { s := &streamWriter{c: c, recType: recType, reqId: reqId} w := bufio.NewWriterSize(s, maxWrite) return &bufWriter{s, w} @@ -247,8 +252,8 @@ func newWriter(c *FCGIClient, recType uint8, reqId uint16) *bufWriter { // streamWriter abstracts out the separation of a stream into discrete records. // It only writes maxWrite bytes at a time. type streamWriter struct { - c *FCGIClient - recType uint8 + c *conn + recType recType reqId uint16 } @@ -273,22 +278,44 @@ func (w *streamWriter) Close() error { return w.c.writeRecord(w.recType, w.reqId, nil) } -func (client *FCGIClient) Request(env map[string]string, reqStr string) (retout []byte, reterr []byte, err error) { +func NewClient(h string, args ...interface{}) (fcgi *conn, err error) { + var con net.Conn + if len(args) != 1 { + err = errors.New("fcgi: not enough params") + return + } + switch args[0].(type) { + case int: + addr := h + ":" + strconv.FormatInt(int64(args[0].(int)), 10) + con, err = net.Dial("tcp", addr) + case string: + laddr := net.UnixAddr{Name: args[0].(string), Net: h} + con, err = net.DialUnix(h, nil, &laddr) + default: + err = errors.New("fcgi: we only accept int (port) or string (socket) params.") + } + fcgi = &conn{ + rwc: con, + } + return +} - var reqId uint16 = 1 +func (client *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) { defer client.rwc.Close() + var reqId uint16 = 1 - err = client.writeBeginRequest(reqId, uint16(FCGI_RESPONDER), 0) + err = client.writeBeginRequest(reqId, uint16(roleResponder), 0) if err != nil { return } - err = client.writePairs(FCGI_PARAMS, reqId, env) + + err = client.writePairs(typeParams, reqId, env) if err != nil { return } - if len(reqStr) > 0 { - err = client.writeRecord(FCGI_STDIN, reqId, []byte(reqStr)) - if err != nil { + + if len(requestData) > 0 { + if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { return } } @@ -297,23 +324,25 @@ func (client *FCGIClient) Request(env map[string]string, reqStr string) (retout var err1 error // recive untill EOF or FCGI_END_REQUEST +READ_LOOP: for { err1 = rec.read(client.rwc) - if err1 != nil { + if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") { if err1 != io.EOF { err = err1 } break } + switch { - case rec.h.Type == FCGI_STDOUT: + case rec.h.Type == typeStdout: retout = append(retout, rec.content()...) - case rec.h.Type == FCGI_STDERR: + case rec.h.Type == typeStderr: reterr = append(reterr, rec.content()...) - case rec.h.Type == FCGI_END_REQUEST: + case rec.h.Type == typeEndRequest: fallthrough default: - break + break READ_LOOP } } diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 2f34372bf..58db0cf8b 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -1,24 +1,34 @@ package phpfpm import ( + "crypto/rand" + "encoding/binary" "fmt" + "net" + "net/http" + "net/http/fcgi" + "net/http/httptest" "testing" "github.com/influxdb/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "net/http" - "net/http/httptest" ) -func TestPhpFpmGeneratesMetrics(t *testing.T) { - //We create a fake server to return test data - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, outputSample) - })) +type statServer struct{} + +// We create a fake server to return test data +func (s statServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) + fmt.Fprint(w, outputSample) +} + +func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { + sv := statServer{} + ts := httptest.NewServer(sv) defer ts.Close() - //Now we tested again above server, with our authentication data r := &phpfpm{ Urls: []string{ts.URL}, } @@ -29,7 +39,134 @@ func TestPhpFpmGeneratesMetrics(t *testing.T) { require.NoError(t, err) tags := map[string]string{ - "url": ts.Listener.Addr().String(), + "pool": "www", + } + + fields := map[string]interface{}{ + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + } + + acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) +} + +func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { + // Let OS find an available port + tcp, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal("Cannot initalize test server") + } + defer tcp.Close() + + s := statServer{} + go fcgi.Serve(tcp, s) + + //Now we tested again above server + r := &phpfpm{ + Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, + } + + var acc testutil.Accumulator + err = r.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "pool": "www", + } + + fields := map[string]interface{}{ + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + } + + acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) +} + +func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { + // Create a socket in /tmp because we always have write permission and if the + // removing of socket fail when system restart /tmp is clear so + // we don't have junk files around + var randomNumber int64 + binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) + if err != nil { + t.Fatal("Cannot initalize server on port ") + } + + defer tcp.Close() + s := statServer{} + go fcgi.Serve(tcp, s) + + r := &phpfpm{ + Urls: []string{tcp.Addr().String()}, + } + + var acc testutil.Accumulator + + err = r.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "pool": "www", + } + + fields := map[string]interface{}{ + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + } + + acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) +} + +func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { + // Create a socket in /tmp because we always have write permission. If the + // removing of socket fail we won't have junk files around. Cuz when system + // restart, it clears out /tmp + var randomNumber int64 + binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) + if err != nil { + t.Fatal("Cannot initalize server on port ") + } + + defer tcp.Close() + s := statServer{} + go fcgi.Serve(tcp, s) + + r := &phpfpm{ + Urls: []string{tcp.Addr().String() + ":custom-status-path"}, + } + + var acc testutil.Accumulator + + err = r.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ "pool": "www", } @@ -51,7 +188,7 @@ func TestPhpFpmGeneratesMetrics(t *testing.T) { //When not passing server config, we default to localhost //We just want to make sure we did request stat from localhost -func TestHaproxyDefaultGetFromLocalhost(t *testing.T) { +func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { r := &phpfpm{} var acc testutil.Accumulator @@ -61,6 +198,31 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) { assert.Contains(t, err.Error(), "127.0.0.1/status") } +func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { + r := &phpfpm{ + Urls: []string{"http://aninvalidone"}, + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone': Get http://aninvalidone: dial tcp: lookup aninvalidone`) +} + +func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { + r := &phpfpm{ + Urls: []string{"/tmp/invalid.sock"}, + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Equal(t, `Socket doesn't exist '/tmp/invalid.sock': stat /tmp/invalid.sock: no such file or directory`, err.Error()) + +} + const outputSample = ` pool: www process manager: dynamic From bc71e956a57ff065a9f2efbd85f6aa9ac2a27f87 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 18 Jan 2016 11:05:14 -0700 Subject: [PATCH 084/103] changelog bugfix update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d65b8c67c..572447a33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! - [#508](https://github.com/influxdb/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin +- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! ## v0.10.0 [2016-01-12] From 551db206575171bbd091b36591fa65c5bc815432 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Sun, 17 Jan 2016 03:08:02 -0500 Subject: [PATCH 085/103] Add SIGHUP support to reload telegraf config closes #539 --- CHANGELOG.md | 1 + cmd/telegraf/telegraf.go | 238 +++++++++++++++++++++------------------ 2 files changed, 127 insertions(+), 112 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 572447a33..294d38c30 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek! - [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert! - AMQP SSL support. Thanks @ekini! +- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert! ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index a2b5161be..47213e0e0 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -7,6 +7,7 @@ import ( "os" "os/signal" "strings" + "syscall" "github.com/influxdb/telegraf" "github.com/influxdb/telegraf/internal/config" @@ -82,143 +83,156 @@ Examples: ` func main() { - flag.Usage = usageExit - flag.Parse() + reload := make(chan bool, 1) + reload <- true + for <-reload { + reload <- false + flag.Usage = usageExit + flag.Parse() - if flag.NFlag() == 0 { - usageExit() - } + if flag.NFlag() == 0 { + usageExit() + } - var inputFilters []string - if *fInputFiltersLegacy != "" { - inputFilter := strings.TrimSpace(*fInputFiltersLegacy) - inputFilters = strings.Split(":"+inputFilter+":", ":") - } - if *fInputFilters != "" { - inputFilter := strings.TrimSpace(*fInputFilters) - inputFilters = strings.Split(":"+inputFilter+":", ":") - } + var inputFilters []string + if *fInputFiltersLegacy != "" { + inputFilter := strings.TrimSpace(*fInputFiltersLegacy) + inputFilters = strings.Split(":"+inputFilter+":", ":") + } + if *fInputFilters != "" { + inputFilter := strings.TrimSpace(*fInputFilters) + inputFilters = strings.Split(":"+inputFilter+":", ":") + } - var outputFilters []string - if *fOutputFiltersLegacy != "" { - outputFilter := strings.TrimSpace(*fOutputFiltersLegacy) - outputFilters = strings.Split(":"+outputFilter+":", ":") - } - if *fOutputFilters != "" { - outputFilter := strings.TrimSpace(*fOutputFilters) - outputFilters = strings.Split(":"+outputFilter+":", ":") - } + var outputFilters []string + if *fOutputFiltersLegacy != "" { + outputFilter := strings.TrimSpace(*fOutputFiltersLegacy) + outputFilters = strings.Split(":"+outputFilter+":", ":") + } + if *fOutputFilters != "" { + outputFilter := strings.TrimSpace(*fOutputFilters) + outputFilters = strings.Split(":"+outputFilter+":", ":") + } - if *fVersion { - v := fmt.Sprintf("Telegraf - Version %s", Version) - fmt.Println(v) - return - } + if *fVersion { + v := fmt.Sprintf("Telegraf - Version %s", Version) + fmt.Println(v) + return + } - if *fSampleConfig { - config.PrintSampleConfig(inputFilters, outputFilters) - return - } + if *fSampleConfig { + config.PrintSampleConfig(inputFilters, outputFilters) + return + } - if *fUsage != "" { - if err := config.PrintInputConfig(*fUsage); err != nil { - if err2 := config.PrintOutputConfig(*fUsage); err2 != nil { - log.Fatalf("%s and %s", err, err2) + if *fUsage != "" { + if err := config.PrintInputConfig(*fUsage); err != nil { + if err2 := config.PrintOutputConfig(*fUsage); err2 != nil { + log.Fatalf("%s and %s", err, err2) + } + } + return + } + + var ( + c *config.Config + err error + ) + + if *fConfig != "" { + c = config.NewConfig() + c.OutputFilters = outputFilters + c.InputFilters = inputFilters + err = c.LoadConfig(*fConfig) + if err != nil { + log.Fatal(err) + } + } else { + fmt.Println("Usage: Telegraf") + flag.PrintDefaults() + return + } + + if *fConfigDirectoryLegacy != "" { + err = c.LoadDirectory(*fConfigDirectoryLegacy) + if err != nil { + log.Fatal(err) } } - return - } - var ( - c *config.Config - err error - ) + if *fConfigDirectory != "" { + err = c.LoadDirectory(*fConfigDirectory) + if err != nil { + log.Fatal(err) + } + } + if len(c.Outputs) == 0 { + log.Fatalf("Error: no outputs found, did you provide a valid config file?") + } + if len(c.Inputs) == 0 { + log.Fatalf("Error: no plugins found, did you provide a valid config file?") + } - if *fConfig != "" { - c = config.NewConfig() - c.OutputFilters = outputFilters - c.InputFilters = inputFilters - err = c.LoadConfig(*fConfig) + ag, err := telegraf.NewAgent(c) if err != nil { log.Fatal(err) } - } else { - fmt.Println("Usage: Telegraf") - flag.PrintDefaults() - return - } - if *fConfigDirectoryLegacy != "" { - err = c.LoadDirectory(*fConfigDirectoryLegacy) + if *fDebug { + ag.Config.Agent.Debug = true + } + + if *fQuiet { + ag.Config.Agent.Quiet = true + } + + if *fTest { + err = ag.Test() + if err != nil { + log.Fatal(err) + } + return + } + + err = ag.Connect() if err != nil { log.Fatal(err) } - } - if *fConfigDirectory != "" { - err = c.LoadDirectory(*fConfigDirectory) - if err != nil { - log.Fatal(err) - } - } - if len(c.Outputs) == 0 { - log.Fatalf("Error: no outputs found, did you provide a valid config file?") - } - if len(c.Inputs) == 0 { - log.Fatalf("Error: no plugins found, did you provide a valid config file?") - } + shutdown := make(chan struct{}) + signals := make(chan os.Signal) + signal.Notify(signals, os.Interrupt, syscall.SIGHUP) + go func() { + sig := <-signals + if sig == os.Interrupt { + close(shutdown) + } + if sig == syscall.SIGHUP { + log.Printf("Reloading Telegraf config\n") + <-reload + reload <- true + close(shutdown) + } + }() - ag, err := telegraf.NewAgent(c) - if err != nil { - log.Fatal(err) - } + log.Printf("Starting Telegraf (version %s)\n", Version) + log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) + log.Printf("Loaded plugins: %s", strings.Join(c.InputNames(), " ")) + log.Printf("Tags enabled: %s", c.ListTags()) - if *fDebug { - ag.Config.Agent.Debug = true - } + if *fPidfile != "" { + f, err := os.Create(*fPidfile) + if err != nil { + log.Fatalf("Unable to create pidfile: %s", err) + } - if *fQuiet { - ag.Config.Agent.Quiet = true - } + fmt.Fprintf(f, "%d\n", os.Getpid()) - if *fTest { - err = ag.Test() - if err != nil { - log.Fatal(err) - } - return - } - - err = ag.Connect() - if err != nil { - log.Fatal(err) - } - - shutdown := make(chan struct{}) - signals := make(chan os.Signal) - signal.Notify(signals, os.Interrupt) - go func() { - <-signals - close(shutdown) - }() - - log.Printf("Starting Telegraf (version %s)\n", Version) - log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("Loaded plugins: %s", strings.Join(c.InputNames(), " ")) - log.Printf("Tags enabled: %s", c.ListTags()) - - if *fPidfile != "" { - f, err := os.Create(*fPidfile) - if err != nil { - log.Fatalf("Unable to create pidfile: %s", err) + f.Close() } - fmt.Fprintf(f, "%d\n", os.Getpid()) - - f.Close() + ag.Run(shutdown) } - - ag.Run(shutdown) } func usageExit() { From 1388b1b58b0b486963ddfea3b9c7ceb267f14dec Mon Sep 17 00:00:00 2001 From: Vinh Date: Thu, 14 Jan 2016 17:09:38 -0800 Subject: [PATCH 086/103] Add phusion Passenger plugin Gather metric by parsing XMLoutput of `passenger-status` utility. More information of this utility: https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html closes #522 --- CHANGELOG.md | 1 + Godeps | 1 + README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/passenger/README.md | 138 ++++++++++ plugins/inputs/passenger/passenger.go | 250 +++++++++++++++++ plugins/inputs/passenger/passenger_test.go | 301 +++++++++++++++++++++ 7 files changed, 693 insertions(+) create mode 100644 plugins/inputs/passenger/README.md create mode 100644 plugins/inputs/passenger/passenger.go create mode 100644 plugins/inputs/passenger/passenger_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 294d38c30..82a447e0f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ - [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert! - AMQP SSL support. Thanks @ekini! - [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert! +- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain! ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/Godeps b/Godeps index 1b427674a..0b8b9ceb1 100644 --- a/Godeps +++ b/Godeps @@ -46,6 +46,7 @@ github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 golang.org/x/crypto 3760e016850398b85094c4c99e955b8c3dea5711 golang.org/x/net 99ca920b6037ef77af8a11297150f7f0d8f4ef80 +golang.org/x/text cf4986612c83df6c55578ba198316d1684a9a287 gopkg.in/dancannon/gorethink.v1 e2cef022d0495329dfb0635991de76efcab5cf50 gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49 diff --git a/README.md b/README.md index 539895899..840fb5e72 100644 --- a/README.md +++ b/README.md @@ -154,6 +154,7 @@ Currently implemented sources: * nginx * nsq * phpfpm +* phusion passenger * ping * postgresql * procstat diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index b4c8553c3..c9e8ea4c8 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -20,6 +20,7 @@ import ( _ "github.com/influxdb/telegraf/plugins/inputs/mysql" _ "github.com/influxdb/telegraf/plugins/inputs/nginx" _ "github.com/influxdb/telegraf/plugins/inputs/nsq" + _ "github.com/influxdb/telegraf/plugins/inputs/passenger" _ "github.com/influxdb/telegraf/plugins/inputs/phpfpm" _ "github.com/influxdb/telegraf/plugins/inputs/ping" _ "github.com/influxdb/telegraf/plugins/inputs/postgresql" diff --git a/plugins/inputs/passenger/README.md b/plugins/inputs/passenger/README.md new file mode 100644 index 000000000..64e39729b --- /dev/null +++ b/plugins/inputs/passenger/README.md @@ -0,0 +1,138 @@ +# Telegraf plugin: passenger + +Get phusion passenger stat using their command line utility +`passenger-status` + +# Measurements + +Meta: + +- tags: + + * name + * passenger_version + * pid + * code_revision + +Measurement names: + +- passenger: + + * Tags: `passenger_version` + * Fields: + + - process_count + - max + - capacity_used + - get_wait_list_size + +- passenger_supergroup: + + * Tags: `name` + * Fields: + + - get_wait_list_size + - capacity_used + +- passenger_group: + + * Tags: + + - name + - app_root + - app_type + + * Fields: + + - get_wait_list_size + - capacity_used + - processes_being_spawned + +- passenger_process: + + * Tags: + + - group_name + - app_root + - supergroup_name + - pid + - code_revision + - life_status + - process_group_id + + * Field: + + - concurrency + - sessions + - busyness + - processed + - spawner_creation_time + - spawn_start_time + - spawn_end_time + - last_used + - uptime + - cpu + - rss + - pss + - private_dirty + - swap + - real_memory + - vmsize + +# Example output + +Using this configuration: + +``` +[[inputs.passenger]] + # Path of passenger-status. + # + # Plugin gather metric via parsing XML output of passenger-status + # More information about the tool: + # https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html + # + # + # If no path is specified, then the plugin simply execute passenger-status + # hopefully it can be found in your PATH + command = "passenger-status -v --show=xml" +``` + +When run with: + +``` +./telegraf -config telegraf.conf -test -input-filter passenger +``` + +It produces: + +``` +> passenger,passenger_version=5.0.17 capacity_used=23i,get_wait_list_size=0i,max=23i,process_count=23i 1452984112799414257 +> passenger_supergroup,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i 1452984112799496977 +> passenger_group,app_root=/var/app/current,app_type=rack,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i,processes_being_spawned=0i 1452984112799527021 +> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11553,process_group_id=13608,supergroup_name=/var/app/current/public busyness=0i,concurrency=1i,cpu=58i,last_used=1452747071764940i,private_dirty=314900i,processed=951i,pss=319391i,real_memory=314900i,rss=418548i,sessions=0i,spawn_end_time=1452746845013365i,spawn_start_time=1452746844946982i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563580i 1452984112799571490 +> passenger_process,app_root=/var/app/current,code_revision=899ac7f,group_name=/var/app/current/public,life_status=ALIVE,pid=11563,process_group_id=13608,supergroup_name=/var/app/current/public busyness=2147483647i,concurrency=1i,cpu=47i,last_used=1452747071709179i,private_dirty=309240i,processed=756i,pss=314036i,real_memory=309240i,rss=418296i,sessions=1i,spawn_end_time=1452746845172460i,spawn_start_time=1452746845136882i,spawner_creation_time=1452746835922747i,swap=0i,uptime=226i,vmsize=1563608i 1452984112799638581 +``` + +# Note + +You have to ensure that you can run the `passenger-status` command under +telegraf user. Depend on how you install and configure passenger, this +maybe an issue for you. If you are using passenger standlone, or compile +yourself, it is straight forward. However, if you are using gem and +`rvm`, it maybe harder to get this right. + +Such as with `rvm`, you can use this command: + +``` +~/.rvm/bin/rvm default do passenger-status -v --show=xml +``` + +You can use `&` and `;` in the shell command to run comlicated shell command +in order to get the passenger-status such as load the rvm shell, source the +path +``` +command = "source .rvm/scripts/rvm && passenger-status -v --show=xml" +``` + +Anyway, just ensure that you can run the command under `telegraf` user, and it +has to produce XML output. diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go new file mode 100644 index 000000000..2d98f8c58 --- /dev/null +++ b/plugins/inputs/passenger/passenger.go @@ -0,0 +1,250 @@ +package passenger + +import ( + "bytes" + "encoding/xml" + "fmt" + "os/exec" + "strconv" + "strings" + + "github.com/influxdb/telegraf/plugins/inputs" + "golang.org/x/net/html/charset" +) + +type passenger struct { + Command string +} + +func (p *passenger) parseCommand() (string, []string) { + var arguments []string + if !strings.Contains(p.Command, " ") { + return p.Command, arguments + } + + arguments = strings.Split(p.Command, " ") + if len(arguments) == 1 { + return arguments[0], arguments[1:] + } + + return arguments[0], arguments[1:] +} + +type info struct { + Passenger_version string `xml:"passenger_version"` + Process_count int `xml:"process_count"` + Capacity_used int `xml:"capacity_used"` + Get_wait_list_size int `xml:"get_wait_list_size"` + Max int `xml:"max"` + Supergroups struct { + Supergroup []struct { + Name string `xml:"name"` + Get_wait_list_size int `xml:"get_wait_list_size"` + Capacity_used int `xml:"capacity_used"` + Group []struct { + Name string `xml:"name"` + AppRoot string `xml:"app_root"` + AppType string `xml:"app_type"` + Enabled_process_count int `xml:"enabled_process_count"` + Disabling_process_count int `xml:"disabling_process_count"` + Disabled_process_count int `xml:"disabled_process_count"` + Capacity_used int `xml:"capacity_used"` + Get_wait_list_size int `xml:"get_wait_list_size"` + Processes_being_spawned int `xml:"processes_being_spawned"` + Processes struct { + Process []*process `xml:"process"` + } `xml:"processes"` + } `xml:"group"` + } `xml:"supergroup"` + } `xml:"supergroups"` +} + +type process struct { + Pid int `xml:"pid"` + Concurrency int `xml:"concurrency"` + Sessions int `xml:"sessions"` + Busyness int `xml:"busyness"` + Processed int `xml:"processed"` + Spawner_creation_time int64 `xml:"spawner_creation_time"` + Spawn_start_time int64 `xml:"spawn_start_time"` + Spawn_end_time int64 `xml:"spawn_end_time"` + Last_used int64 `xml:"last_used"` + Uptime string `xml:"uptime"` + Code_revision string `xml:"code_revision"` + Life_status string `xml:"life_status"` + Enabled string `xml:"enabled"` + Has_metrics bool `xml:"has_metrics"` + Cpu int64 `xml:"cpu"` + Rss int64 `xml:"rss"` + Pss int64 `xml:"pss"` + Private_dirty int64 `xml:"private_dirty"` + Swap int64 `xml:"swap"` + Real_memory int64 `xml:"real_memory"` + Vmsize int64 `xml:"vmsize"` + Process_group_id string `xml:"process_group_id"` +} + +func (p *process) getUptime() int64 { + if p.Uptime == "" { + return 0 + } + + timeSlice := strings.Split(p.Uptime, " ") + var uptime int64 + uptime = 0 + for _, v := range timeSlice { + switch { + case strings.HasSuffix(v, "d"): + iValue := strings.TrimSuffix(v, "d") + value, err := strconv.ParseInt(iValue, 10, 64) + if err == nil { + uptime += value * (24 * 60 * 60) + } + case strings.HasSuffix(v, "h"): + iValue := strings.TrimSuffix(v, "y") + value, err := strconv.ParseInt(iValue, 10, 64) + if err == nil { + uptime += value * (60 * 60) + } + case strings.HasSuffix(v, "m"): + iValue := strings.TrimSuffix(v, "m") + value, err := strconv.ParseInt(iValue, 10, 64) + if err == nil { + uptime += value * 60 + } + case strings.HasSuffix(v, "s"): + iValue := strings.TrimSuffix(v, "s") + value, err := strconv.ParseInt(iValue, 10, 64) + if err == nil { + uptime += value + } + } + } + + return uptime +} + +var sampleConfig = ` + # Path of passenger-status. + # + # Plugin gather metric via parsing XML output of passenger-status + # More information about the tool: + # https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html + # + # + # If no path is specified, then the plugin simply execute passenger-status + # hopefully it can be found in your PATH + command = "passenger-status -v --show=xml" +` + +func (r *passenger) SampleConfig() string { + return sampleConfig +} + +func (r *passenger) Description() string { + return "Read metrics of passenger using passenger-status" +} + +func (g *passenger) Gather(acc inputs.Accumulator) error { + if g.Command == "" { + g.Command = "passenger-status -v --show=xml" + } + + cmd, args := g.parseCommand() + out, err := exec.Command(cmd, args...).Output() + + if err != nil { + return err + } + + if err = importMetric(out, acc); err != nil { + return err + } + + return nil +} + +func importMetric(stat []byte, acc inputs.Accumulator) error { + var p info + + decoder := xml.NewDecoder(bytes.NewReader(stat)) + decoder.CharsetReader = charset.NewReaderLabel + if err := decoder.Decode(&p); err != nil { + return fmt.Errorf("Cannot parse input with error: %v\n", err) + } + + tags := map[string]string{ + "passenger_version": p.Passenger_version, + } + fields := map[string]interface{}{ + "process_count": p.Process_count, + "max": p.Max, + "capacity_used": p.Capacity_used, + "get_wait_list_size": p.Get_wait_list_size, + } + acc.AddFields("passenger", fields, tags) + + for _, sg := range p.Supergroups.Supergroup { + tags := map[string]string{ + "name": sg.Name, + } + fields := map[string]interface{}{ + "get_wait_list_size": sg.Get_wait_list_size, + "capacity_used": sg.Capacity_used, + } + acc.AddFields("passenger_supergroup", fields, tags) + + for _, group := range sg.Group { + tags := map[string]string{ + "name": group.Name, + "app_root": group.AppRoot, + "app_type": group.AppType, + } + fields := map[string]interface{}{ + "get_wait_list_size": group.Get_wait_list_size, + "capacity_used": group.Capacity_used, + "processes_being_spawned": group.Processes_being_spawned, + } + acc.AddFields("passenger_group", fields, tags) + + for _, process := range group.Processes.Process { + tags := map[string]string{ + "group_name": group.Name, + "app_root": group.AppRoot, + "supergroup_name": sg.Name, + "pid": fmt.Sprintf("%d", process.Pid), + "code_revision": process.Code_revision, + "life_status": process.Life_status, + "process_group_id": process.Process_group_id, + } + fields := map[string]interface{}{ + "concurrency": process.Concurrency, + "sessions": process.Sessions, + "busyness": process.Busyness, + "processed": process.Processed, + "spawner_creation_time": process.Spawner_creation_time, + "spawn_start_time": process.Spawn_start_time, + "spawn_end_time": process.Spawn_end_time, + "last_used": process.Last_used, + "uptime": process.getUptime(), + "cpu": process.Cpu, + "rss": process.Rss, + "pss": process.Pss, + "private_dirty": process.Private_dirty, + "swap": process.Swap, + "real_memory": process.Real_memory, + "vmsize": process.Vmsize, + } + acc.AddFields("passenger_process", fields, tags) + } + } + } + + return nil +} + +func init() { + inputs.Add("passenger", func() inputs.Input { + return &passenger{} + }) +} diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go new file mode 100644 index 000000000..3440c5337 --- /dev/null +++ b/plugins/inputs/passenger/passenger_test.go @@ -0,0 +1,301 @@ +package passenger + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/influxdb/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func fakePassengerStatus(stat string) { + content := fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat) + ioutil.WriteFile("/tmp/passenger-status", []byte(content), 0700) +} + +func teardown() { + os.Remove("/tmp/passenger-status") +} + +func Test_Invalid_Passenger_Status_Cli(t *testing.T) { + r := &passenger{ + Command: "an-invalid-command passenger-status", + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Equal(t, err.Error(), `exec: "an-invalid-command": executable file not found in $PATH`) +} + +func Test_Invalid_Xml(t *testing.T) { + fakePassengerStatus("invalid xml") + defer teardown() + + r := &passenger{ + Command: "/tmp/passenger-status", + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Equal(t, err.Error(), "Cannot parse input with error: EOF\n") +} + +// We test this by ensure that the error message match the path of default cli +func Test_Default_Config_Load_Default_Command(t *testing.T) { + fakePassengerStatus("invalid xml") + defer teardown() + + r := &passenger{} + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.Error(t, err) + assert.Equal(t, err.Error(), "exec: \"passenger-status\": executable file not found in $PATH") +} + +func TestPassengerGenerateMetric(t *testing.T) { + fakePassengerStatus(sampleStat) + defer teardown() + + //Now we tested again above server, with our authentication data + r := &passenger{ + Command: "/tmp/passenger-status", + } + + var acc testutil.Accumulator + + err := r.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "passenger_version": "5.0.17", + } + fields := map[string]interface{}{ + "process_count": 23, + "max": 23, + "capacity_used": 23, + "get_wait_list_size": 3, + } + acc.AssertContainsTaggedFields(t, "passenger", fields, tags) + + tags = map[string]string{ + "name": "/var/app/current/public", + "app_root": "/var/app/current", + "app_type": "rack", + } + fields = map[string]interface{}{ + "processes_being_spawned": 2, + "capacity_used": 23, + "get_wait_list_size": 3, + } + acc.AssertContainsTaggedFields(t, "passenger_group", fields, tags) + + tags = map[string]string{ + "name": "/var/app/current/public", + } + + fields = map[string]interface{}{ + "capacity_used": 23, + "get_wait_list_size": 3, + } + acc.AssertContainsTaggedFields(t, "passenger_supergroup", fields, tags) + + tags = map[string]string{ + "app_root": "/var/app/current", + "group_name": "/var/app/current/public", + "supergroup_name": "/var/app/current/public", + "pid": "11553", + "code_revision": "899ac7f", + "life_status": "ALIVE", + "process_group_id": "13608", + } + fields = map[string]interface{}{ + "concurrency": 1, + "sessions": 0, + "busyness": 0, + "processed": 951, + "spawner_creation_time": int64(1452746835922747), + "spawn_start_time": int64(1452746844946982), + "spawn_end_time": int64(1452746845013365), + "last_used": int64(1452747071764940), + "uptime": int64(226), // in seconds of 3m 46s + "cpu": int64(58), + "rss": int64(418548), + "pss": int64(319391), + "private_dirty": int64(314900), + "swap": int64(0), + "real_memory": int64(314900), + "vmsize": int64(1563580), + } + acc.AssertContainsTaggedFields(t, "passenger_process", fields, tags) +} + +var sampleStat = ` + + + + 5.0.17 + 1 + 23 + 23 + 23 + 3 + + + + /var/app/current/public + READY + 3 + 23 + foo + + /var/app/current/public + /var/app/current/public + /var/app/current + rack + production + QQUrbCVYxbJYpfgyDOwJ + 23 + 0 + 0 + 23 + 3 + 0 + 2 + foo + foo + ALIVE + axcoto + 1001 + axcoto + 1001 + + /var/app/current + /var/app/current/public + rack + /var/app/.rvm/gems/ruby-2.2.0-p645/gems/passenger-5.0.17/helper-scripts/rack-loader.rb + config.ru + Passenger RubyApp + 3 + 90000 + production + / + smart + nobody + nogroup + /var/app/.rvm/gems/ruby-2.2.0-p645/wrappers/ruby + python + node + unix:/tmp/passenger.eKFdvdC/agents.s/ust_router + logging + foo + false + false + foo + 22 + 0 + 300 + 1 + + + + 11553 + 378579907 + 17173df-PoNT3J9HCf + 1 + 0 + 0 + 951 + 1452746835922747 + 1452746844946982 + 1452746845013365 + 1452747071764940 + 0s ago + 3m 46s + 899ac7f + ALIVE + ENABLED + true + 58 + 418548 + 319391 + 314900 + 0 + 314900 + 1563580 + 13608 + Passenger RubyApp: /var/app/current/public + + + main +
unix:/tmp/passenger.eKFdvdC/apps.s/ruby.UWF6zkRJ71aoMXPxpknpWVfC1POFqgWZzbEsdz5v0G46cSSMxJ3GHLFhJaUrK2I
+ session + 1 + 0 +
+ + http +
tcp://127.0.0.1:49888
+ http + 1 + 0 +
+
+
+ + 11563 + 1549681201 + 17173df-pX5iJOipd8 + 1 + 1 + 2147483647 + 756 + 1452746835922747 + 1452746845136882 + 1452746845172460 + 1452747071709179 + 0s ago + 3m 46s + 899ac7f + ALIVE + ENABLED + true + 47 + 418296 + 314036 + 309240 + 0 + 309240 + 1563608 + 13608 + Passenger RubyApp: /var/app/current/public + + + main +
unix:/tmp/passenger.eKFdvdC/apps.s/ruby.PVCh7TmvCi9knqhba2vG5qXrlHGEIwhGrxnUvRbIAD6SPz9m0G7YlJ8HEsREHY3
+ session + 1 + 1 +
+ + http +
tcp://127.0.0.1:52783
+ http + 1 + 0 +
+
+
+
+
+
+
+
` From c313af1b24dd9e32172fe6a100af75bbfe04792b Mon Sep 17 00:00:00 2001 From: Hannu Valtonen Date: Mon, 11 Jan 2016 14:20:51 +0200 Subject: [PATCH 087/103] kafka: Add support for using TLS authentication for the kafka output With the advent of Kafka 0.9.0+ it is possible to set up TLS client certificate based authentication to limit access to Kafka. Four new configuration variables are specified for setting up the authentication. If they're not set the behavior stays the same as before the change. closes #541 --- CHANGELOG.md | 1 + plugins/outputs/kafka/kafka.go | 67 ++++++++++++++++++++++++++++++++-- 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 82a447e0f..080ce5870 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - AMQP SSL support. Thanks @ekini! - [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert! - [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain! +- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod! ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 8e53cc511..55ef35fb4 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -1,12 +1,14 @@ package kafka import ( + "crypto/tls" + "crypto/x509" "errors" "fmt" - "github.com/Shopify/sarama" "github.com/influxdb/influxdb/client/v2" "github.com/influxdb/telegraf/plugins/outputs" + "io/ioutil" ) type Kafka struct { @@ -16,8 +18,17 @@ type Kafka struct { Topic string // Routing Key Tag RoutingTag string `toml:"routing_tag"` + // TLS client certificate + Certificate string + // TLS client key + Key string + // TLS certificate authority + CA string + // Verfiy SSL certificate chain + VerifySsl bool - producer sarama.SyncProducer + tlsConfig tls.Config + producer sarama.SyncProducer } var sampleConfig = ` @@ -28,10 +39,60 @@ var sampleConfig = ` # Telegraf tag to use as a routing key # ie, if this tag exists, it's value will be used as the routing key routing_tag = "host" + + # Optional TLS configuration: + # Client certificate + certificate = "" + # Client key + key = "" + # Certificate authority file + ca = "" + # Verify SSL certificate chain + verify_ssl = false ` +func createTlsConfiguration(k *Kafka) (t *tls.Config, err error) { + if k.Certificate != "" && k.Key != "" && k.CA != "" { + cert, err := tls.LoadX509KeyPair(k.Certificate, k.Key) + if err != nil { + return nil, errors.New(fmt.Sprintf("Cout not load Kafka TLS client key/certificate: %s", + err)) + } + + caCert, err := ioutil.ReadFile(k.CA) + if err != nil { + return nil, errors.New(fmt.Sprintf("Cout not load Kafka TLS CA: %s", + err)) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + t = &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + InsecureSkipVerify: k.VerifySsl, + } + } + // will be nil by default if nothing is provided + return t, nil +} + func (k *Kafka) Connect() error { - producer, err := sarama.NewSyncProducer(k.Brokers, nil) + config := sarama.NewConfig() + config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message + config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message + tlsConfig, err := createTlsConfiguration(k) + if err != nil { + return err + } + + if tlsConfig != nil { + config.Net.TLS.Config = tlsConfig + config.Net.TLS.Enable = true + } + + producer, err := sarama.NewSyncProducer(k.Brokers, config) if err != nil { return err } From 7efe1086862ca971d5ea7f228e7f4b34f580edb4 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 19 Jan 2016 09:37:17 -0700 Subject: [PATCH 088/103] Update Godeps file --- Godeps | 16 ++++++++-------- README.md | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Godeps b/Godeps index 0b8b9ceb1..f719539f5 100644 --- a/Godeps +++ b/Godeps @@ -1,9 +1,9 @@ git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034 github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef -github.com/Sirupsen/logrus 446d1c146faa8ed3f4218f056fcd165f6bcfda81 +github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252 github.com/amir/raidman 6a8e089bbe32e6b907feae5ba688841974b3c339 github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757 -github.com/aws/aws-sdk-go c4c1a1a2a076858fe18b2be674d833c796c45b09 +github.com/aws/aws-sdk-go 3ad0b07b44c22c21c734d1094981540b7a11e942 github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d github.com/boltdb/bolt 6465994716bf6400605746e79224cf1e7ed68725 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 @@ -11,18 +11,18 @@ github.com/dancannon/gorethink ff457cac6a529d9749d841a733d76e8305cba3c8 github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 -github.com/fsouza/go-dockerclient 2fb7694010aa553998ed513dc8805ab00708077a +github.com/fsouza/go-dockerclient 6fb38e6bb3d544d7eb5b55fd396cd4e6850802d8 github.com/go-ini/ini afbd495e5aaea13597b5e14fe514ddeaa4d76fc3 -github.com/go-sql-driver/mysql 6fd058ce0d6b7ee43174e80d5a3e7f483c4dfbe5 +github.com/go-sql-driver/mysql 72ea5d0b32a04c67710bf63e97095d82aea5f352 github.com/gogo/protobuf c57e439bad574c2e0877ff18d514badcfced004d github.com/golang/protobuf 2402d76f3d41f928c7902a765dfc872356dd3aad github.com/golang/snappy 723cc1e459b8eea2dea4583200fd60757d40097a github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 github.com/hailocab/go-hostpool 50839ee41f32bfca8d03a183031aa634b2dc1c64 github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458 -github.com/hashicorp/raft d136cd15dfb7876fd7c89cad1995bc4f19ceb294 +github.com/hashicorp/raft b95f335efee1992886864389183ebda0c0a5d0f6 github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee -github.com/influxdb/influxdb db84a6ed76353905432ff8bd91527c68b3ea1be6 +github.com/influxdb/influxdb 0e0f85a0c1fd1788ae4f9145531b02c539cfa5b5 github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f @@ -45,9 +45,9 @@ github.com/stretchr/testify f390dcf405f7b83c997eac1b06768bb9f44dec18 github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 golang.org/x/crypto 3760e016850398b85094c4c99e955b8c3dea5711 -golang.org/x/net 99ca920b6037ef77af8a11297150f7f0d8f4ef80 +golang.org/x/net 72aa00c6241a8013dc9b040abb45f57edbe73945 golang.org/x/text cf4986612c83df6c55578ba198316d1684a9a287 gopkg.in/dancannon/gorethink.v1 e2cef022d0495329dfb0635991de76efcab5cf50 gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 -gopkg.in/mgo.v2 e30de8ac9ae3b30df7065f766c71f88bba7d4e49 +gopkg.in/mgo.v2 03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64 gopkg.in/yaml.v2 f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 diff --git a/README.md b/README.md index 840fb5e72..80c739d03 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ brew install telegraf Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm), which gets installed via the Makefile -if you don't have it already. You also must build with golang version 1.4+. +if you don't have it already. You also must build with golang version 1.5+. 1. [Install Go](https://golang.org/doc/install) 2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH) From 6a50fceea40a71b1b283dec344191ef4b325e357 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Sun, 17 Jan 2016 17:32:24 -0500 Subject: [PATCH 089/103] Replace plugins by inputs in some strings closes #542 --- cmd/telegraf/telegraf.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 47213e0e0..554569012 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -28,14 +28,14 @@ var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") var fPidfile = flag.String("pidfile", "", "file to write our pid to") var fInputFilters = flag.String("input-filter", "", - "filter the plugins to enable, separator is :") + "filter the inputs to enable, separator is :") var fOutputFilters = flag.String("output-filter", "", "filter the outputs to enable, separator is :") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf -usage mysql'") var fInputFiltersLegacy = flag.String("filter", "", - "filter the plugins to enable, separator is :") + "filter the inputs to enable, separator is :") var fOutputFiltersLegacy = flag.String("outputfilter", "", "filter the outputs to enable, separator is :") var fConfigDirectoryLegacy = flag.String("configdirectory", "", @@ -170,7 +170,7 @@ func main() { log.Fatalf("Error: no outputs found, did you provide a valid config file?") } if len(c.Inputs) == 0 { - log.Fatalf("Error: no plugins found, did you provide a valid config file?") + log.Fatalf("Error: no inputs found, did you provide a valid config file?") } ag, err := telegraf.NewAgent(c) @@ -217,7 +217,7 @@ func main() { log.Printf("Starting Telegraf (version %s)\n", Version) log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("Loaded plugins: %s", strings.Join(c.InputNames(), " ")) + log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " ")) log.Printf("Tags enabled: %s", c.ListTags()) if *fPidfile != "" { From 839651fadb7761f851bc439d59d72aff396ac194 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 19 Jan 2016 11:01:53 -0700 Subject: [PATCH 090/103] Change default statsd packet size to 1500, make configurable Also modifying the internal UDP listener/parser code to make it able to handle higher load. The udp listener will no longer do any parsing or string conversion. It will simply read UDP packets as bytes and put them into a channel. The parser thread will now deal with splitting the UDP metrics into separated strings. This could probably be made even better by leaving everything as byte arrays. fixes #543 --- plugins/inputs/statsd/statsd.go | 55 +++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index a4b70ffe3..d9f597bcf 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -15,6 +15,8 @@ import ( "github.com/influxdb/telegraf/plugins/inputs" ) +const UDP_PACKET_SIZE int = 1500 + var dropwarn = "ERROR: Message queue full. Discarding line [%s] " + "You may want to increase allowed_pending_messages in the config\n" @@ -37,10 +39,14 @@ type Statsd struct { DeleteTimings bool ConvertNames bool + // UDPPacketSize is the size of the read packets for the server listening + // for statsd UDP packets. This will default to 1500 bytes. + UDPPacketSize int `toml:"udp_packet_size"` + sync.Mutex - // Channel for all incoming statsd messages - in chan string + // Channel for all incoming statsd packets + in chan []byte done chan struct{} // Cache gauges, counters & sets so they can be aggregated as they arrive @@ -58,13 +64,14 @@ func NewStatsd() *Statsd { // Make data structures s.done = make(chan struct{}) - s.in = make(chan string, s.AllowedPendingMessages) + s.in = make(chan []byte, s.AllowedPendingMessages) s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) s.ConvertNames = true + s.UDPPacketSize = UDP_PACKET_SIZE return &s } @@ -139,6 +146,10 @@ const sampleConfig = ` # calculation of percentiles. Raising this limit increases the accuracy # of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 + + # UDP packet size for the server to listen for. This will depend on the size + # of the packets that the client is sending, which is usually 1500 bytes. + udp_packet_size = 1500 ` func (_ *Statsd) SampleConfig() string { @@ -191,7 +202,7 @@ func (s *Statsd) Gather(acc inputs.Accumulator) error { func (s *Statsd) Start() error { // Make data structures s.done = make(chan struct{}) - s.in = make(chan string, s.AllowedPendingMessages) + s.in = make(chan []byte, s.AllowedPendingMessages) s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) @@ -220,36 +231,37 @@ func (s *Statsd) udpListen() error { case <-s.done: return nil default: - buf := make([]byte, 1024) + buf := make([]byte, s.UDPPacketSize) n, _, err := listener.ReadFromUDP(buf) if err != nil { log.Printf("ERROR: %s\n", err.Error()) } - lines := strings.Split(string(buf[:n]), "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line != "" { - select { - case s.in <- line: - default: - log.Printf(dropwarn, line) - } - } + select { + case s.in <- buf[:n]: + default: + log.Printf(dropwarn, string(buf[:n])) } } } } -// parser monitors the s.in channel, if there is a line ready, it parses the -// statsd string into a usable metric struct and aggregates the value +// parser monitors the s.in channel, if there is a packet ready, it parses the +// packet into statsd strings and then calls parseStatsdLine, which parses a +// single statsd metric into a struct. func (s *Statsd) parser() error { for { select { case <-s.done: return nil - case line := <-s.in: - s.parseStatsdLine(line) + case packet := <-s.in: + lines := strings.Split(string(packet), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" { + s.parseStatsdLine(line) + } + } } } } @@ -499,6 +511,9 @@ func (s *Statsd) Stop() { func init() { inputs.Add("statsd", func() inputs.Input { - return &Statsd{ConvertNames: true} + return &Statsd{ + ConvertNames: true, + UDPPacketSize: UDP_PACKET_SIZE, + } }) } From f3b553712a79455817304166e6791f8e913d4984 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 19 Jan 2016 11:20:56 -0700 Subject: [PATCH 091/103] Changelog update --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 080ce5870..77f5a082d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,11 +12,13 @@ - [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert! - [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain! - [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod! +- [#551](https://github.com/influxdb/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable. ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! - [#508](https://github.com/influxdb/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin - [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! +- [#543](https://github.com/influxdb/telegraf/issues/543): Statsd Packet size sometimes truncated. ## v0.10.0 [2016-01-12] From d3a5cca1bc18f315dacbd9941f1e4556fde86538 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 19 Jan 2016 13:00:36 -0700 Subject: [PATCH 092/103] Collection interval random jittering closes #460 --- CHANGELOG.md | 1 + agent.go | 19 +++++++++++++++---- internal/config/config.go | 16 +++++++++++++++- 3 files changed, 31 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77f5a082d..9319c775e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain! - [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod! - [#551](https://github.com/influxdb/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable. +- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering. ### Bugfixes - [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/agent.go b/agent.go index 25fd46462..25b157c54 100644 --- a/agent.go +++ b/agent.go @@ -1,10 +1,11 @@ package telegraf import ( - "crypto/rand" + cryptorand "crypto/rand" "fmt" "log" "math/big" + "math/rand" "os" "sync" "time" @@ -92,6 +93,7 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error { start := time.Now() counter := 0 + jitter := a.Config.Agent.CollectionJitter.Duration.Nanoseconds() for _, input := range a.Config.Inputs { if input.Config.Interval != 0 { continue @@ -104,9 +106,19 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error { acc := NewAccumulator(input.Config, pointChan) acc.SetDebug(a.Config.Agent.Debug) - // acc.SetPrefix(input.Name + "_") acc.SetDefaultTags(a.Config.Tags) + if jitter != 0 { + nanoSleep := rand.Int63n(jitter) + d, err := time.ParseDuration(fmt.Sprintf("%dns", nanoSleep)) + if err != nil { + log.Printf("Jittering collection interval failed for plugin %s", + input.Name) + } else { + time.Sleep(d) + } + } + if err := input.Input.Gather(acc); err != nil { log.Printf("Error in input [%s]: %s", input.Name, err) } @@ -143,7 +155,6 @@ func (a *Agent) gatherSeparate( acc := NewAccumulator(input.Config, pointChan) acc.SetDebug(a.Config.Agent.Debug) - // acc.SetPrefix(input.Name + "_") acc.SetDefaultTags(a.Config.Tags) if err := input.Input.Gather(acc); err != nil { @@ -315,7 +326,7 @@ func jitterInterval(ininterval, injitter time.Duration) time.Duration { outinterval := ininterval if injitter.Nanoseconds() != 0 { maxjitter := big.NewInt(injitter.Nanoseconds()) - if j, err := rand.Int(rand.Reader, maxjitter); err == nil { + if j, err := cryptorand.Int(cryptorand.Reader, maxjitter); err == nil { jitter = j.Int64() } outinterval = time.Duration(jitter + ininterval.Nanoseconds()) diff --git a/internal/config/config.go b/internal/config/config.go index ca4972b69..e4d7fbc9d 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -61,13 +61,22 @@ type AgentConfig struct { // ie, if Interval=10s then always collect on :00, :10, :20, etc. RoundInterval bool + // CollectionJitter is used to jitter the collection by a random amount. + // Each plugin will sleep for a random time within jitter before collecting. + // This can be used to avoid many plugins querying things like sysfs at the + // same time, which can have a measurable effect on the system. + CollectionJitter internal.Duration + // Interval at which to flush data FlushInterval internal.Duration // FlushRetries is the number of times to retry each data flush FlushRetries int - // FlushJitter tells + // FlushJitter Jitters the flush interval by a random amount. + // This is primarily to avoid large write spikes for users running a large + // number of telegraf instances. + // ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s FlushJitter internal.Duration // TODO(cam): Remove UTC and Precision parameters, they are no longer @@ -271,6 +280,11 @@ var header = `# Telegraf configuration # Rounds collection interval to 'interval' # ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true + # Collection jitter is used to jitter the collection by a random amount. + # Each plugin will sleep for a random time within jitter before collecting. + # This can be used to avoid many plugins querying things like sysfs at the + # same time, which can have a measurable effect on the system. + collection_jitter = "0s" # Default data flushing interval for all outputs. You should not set this below # interval. Maximum flush_interval will be flush_interval + flush_jitter From d3925fe578b7748d6b02fcfdf764894f695b4416 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 19 Jan 2016 17:28:02 -0700 Subject: [PATCH 093/103] Include CPU usage percent with procstat data closes #484 --- agent.go | 2 +- plugins/inputs/procstat/procstat.go | 28 +++++++++------- plugins/inputs/procstat/procstat_test.go | 2 ++ plugins/inputs/procstat/spec_processor.go | 40 +++++++++++------------ 4 files changed, 39 insertions(+), 33 deletions(-) diff --git a/agent.go b/agent.go index 25b157c54..4a54d2172 100644 --- a/agent.go +++ b/agent.go @@ -216,7 +216,7 @@ func (a *Agent) Test() error { // Special instructions for some inputs. cpu, for example, needs to be // run twice in order to return cpu usage percentages. switch input.Name { - case "cpu", "mongodb": + case "cpu", "mongodb", "procstat": time.Sleep(500 * time.Millisecond) fmt.Printf("* Plugin: %s, Collection 2\n", input.Name) if err := input.Input.Gather(acc); err != nil { diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 5e596d6d8..9f30eea83 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -18,10 +18,14 @@ type Procstat struct { Exe string Pattern string Prefix string + + pidmap map[int32]*process.Process } func NewProcstat() *Procstat { - return &Procstat{} + return &Procstat{ + pidmap: make(map[int32]*process.Process), + } } var sampleConfig = ` @@ -46,12 +50,12 @@ func (_ *Procstat) Description() string { } func (p *Procstat) Gather(acc inputs.Accumulator) error { - procs, err := p.createProcesses() + err := p.createProcesses() if err != nil { log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s", p.Exe, p.PidFile, p.Pattern, err.Error()) } else { - for _, proc := range procs { + for _, proc := range p.pidmap { p := NewSpecProcessor(p.Prefix, acc, proc) p.pushMetrics() } @@ -60,8 +64,7 @@ func (p *Procstat) Gather(acc inputs.Accumulator) error { return nil } -func (p *Procstat) createProcesses() ([]*process.Process, error) { - var out []*process.Process +func (p *Procstat) createProcesses() error { var errstring string var outerr error @@ -71,11 +74,14 @@ func (p *Procstat) createProcesses() ([]*process.Process, error) { } for _, pid := range pids { - p, err := process.NewProcess(int32(pid)) - if err == nil { - out = append(out, p) - } else { - errstring += err.Error() + " " + _, ok := p.pidmap[pid] + if !ok { + proc, err := process.NewProcess(pid) + if err == nil { + p.pidmap[pid] = proc + } else { + errstring += err.Error() + " " + } } } @@ -83,7 +89,7 @@ func (p *Procstat) createProcesses() ([]*process.Process, error) { outerr = fmt.Errorf("%s", errstring) } - return out, outerr + return outerr } func (p *Procstat) getAllPids() ([]int32, error) { diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 6ec6834ca..b9eb4a209 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -6,6 +6,7 @@ import ( "strconv" "testing" + "github.com/shirou/gopsutil/process" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,6 +24,7 @@ func TestGather(t *testing.T) { p := Procstat{ PidFile: file.Name(), Prefix: "foo", + pidmap: make(map[int32]*process.Process), } p.Gather(&acc) assert.True(t, acc.HasFloatField("procstat", "foo_cpu_time_user")) diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go index 9c7e53826..9f9ad5342 100644 --- a/plugins/inputs/procstat/spec_processor.go +++ b/plugins/inputs/procstat/spec_processor.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "log" + "time" "github.com/shirou/gopsutil/process" @@ -40,7 +40,7 @@ func NewSpecProcessor( tags := make(map[string]string) tags["pid"] = fmt.Sprintf("%v", p.Pid) if name, err := p.Name(); err == nil { - tags["name"] = name + tags["process_name"] = name } return &SpecProcessor{ Prefix: prefix, @@ -52,21 +52,11 @@ func NewSpecProcessor( } func (p *SpecProcessor) pushMetrics() { - if err := p.pushFDStats(); err != nil { - log.Printf("procstat, fd stats not available: %s", err.Error()) - } - if err := p.pushCtxStats(); err != nil { - log.Printf("procstat, ctx stats not available: %s", err.Error()) - } - if err := p.pushIOStats(); err != nil { - log.Printf("procstat, io stats not available: %s", err.Error()) - } - if err := p.pushCPUStats(); err != nil { - log.Printf("procstat, cpu stats not available: %s", err.Error()) - } - if err := p.pushMemoryStats(); err != nil { - log.Printf("procstat, mem stats not available: %s", err.Error()) - } + p.pushFDStats() + p.pushCtxStats() + p.pushIOStats() + p.pushCPUStats() + p.pushMemoryStats() p.flush() } @@ -113,10 +103,18 @@ func (p *SpecProcessor) pushCPUStats() error { p.add("cpu_time_iowait", cpu_time.Iowait) p.add("cpu_time_irq", cpu_time.Irq) p.add("cpu_time_soft_irq", cpu_time.Softirq) - p.add("cpu_time_soft_steal", cpu_time.Steal) - p.add("cpu_time_soft_stolen", cpu_time.Stolen) - p.add("cpu_time_soft_guest", cpu_time.Guest) - p.add("cpu_time_soft_guest_nice", cpu_time.GuestNice) + p.add("cpu_time_steal", cpu_time.Steal) + p.add("cpu_time_stolen", cpu_time.Stolen) + p.add("cpu_time_guest", cpu_time.Guest) + p.add("cpu_time_guest_nice", cpu_time.GuestNice) + + cpu_perc, err := p.proc.CPUPercent(time.Duration(0)) + if err != nil { + return err + } else if cpu_perc == 0 { + return nil + } + p.add("cpu_usage", cpu_perc) return nil } From fc1aa7d3b40ad4d13fc5414b505af50317b8f35e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 20 Jan 2016 10:42:55 -0700 Subject: [PATCH 094/103] Filter mount points before stats are collected fixes #440 --- etc/telegraf.conf | 2 +- plugins/inputs/system/disk.go | 25 +++++++--------- plugins/inputs/system/disk_test.go | 48 +++++++++++++++++++----------- plugins/inputs/system/mock_PS.go | 4 +-- plugins/inputs/system/ps.go | 20 +++++++++++-- 5 files changed, 61 insertions(+), 38 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 9df2e93d5..9871ae7bc 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -90,7 +90,7 @@ [[inputs.disk]] # By default, telegraf gather stats for all mountpoints. # Setting mountpoints will restrict the stats to the specified mountpoints. - # Mountpoints=["/"] + # mount_points=["/"] # Read metrics about disk IO by device [[inputs.diskio]] diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index 5d1553dd4..de63ff0b8 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -9,7 +9,10 @@ import ( type DiskStats struct { ps PS + // Legacy support Mountpoints []string + + MountPoints []string } func (_ *DiskStats) Description() string { @@ -19,7 +22,7 @@ func (_ *DiskStats) Description() string { var diskSampleConfig = ` # By default, telegraf gather stats for all mountpoints. # Setting mountpoints will restrict the stats to the specified mountpoints. - # Mountpoints=["/"] + # mount_points = ["/"] ` func (_ *DiskStats) SampleConfig() string { @@ -27,25 +30,17 @@ func (_ *DiskStats) SampleConfig() string { } func (s *DiskStats) Gather(acc inputs.Accumulator) error { - disks, err := s.ps.DiskUsage() + // Legacy support: + if len(s.Mountpoints) != 0 { + s.MountPoints = s.Mountpoints + } + + disks, err := s.ps.DiskUsage(s.MountPoints) if err != nil { return fmt.Errorf("error getting disk usage info: %s", err) } - var restrictMpoints bool - mPoints := make(map[string]bool) - if len(s.Mountpoints) != 0 { - restrictMpoints = true - for _, mp := range s.Mountpoints { - mPoints[mp] = true - } - } - for _, du := range disks { - _, member := mPoints[du.Path] - if restrictMpoints && !member { - continue - } tags := map[string]string{ "path": du.Path, "fstype": du.Fstype, diff --git a/plugins/inputs/system/disk_test.go b/plugins/inputs/system/disk_test.go index 6ea110fef..25d991ca3 100644 --- a/plugins/inputs/system/disk_test.go +++ b/plugins/inputs/system/disk_test.go @@ -15,7 +15,7 @@ func TestDiskStats(t *testing.T) { var acc testutil.Accumulator var err error - du := []*disk.DiskUsageStat{ + duAll := []*disk.DiskUsageStat{ { Path: "/", Fstype: "ext4", @@ -33,8 +33,20 @@ func TestDiskStats(t *testing.T) { InodesFree: 468, }, } + duFiltered := []*disk.DiskUsageStat{ + { + Path: "/", + Fstype: "ext4", + Total: 128, + Free: 23, + InodesTotal: 1234, + InodesFree: 234, + }, + } - mps.On("DiskUsage").Return(du, nil) + mps.On("DiskUsage", []string(nil)).Return(duAll, nil) + mps.On("DiskUsage", []string{"/", "/dev"}).Return(duFiltered, nil) + mps.On("DiskUsage", []string{"/", "/home"}).Return(duAll, nil) err = (&DiskStats{ps: &mps}).Gather(&acc) require.NoError(t, err) @@ -53,32 +65,32 @@ func TestDiskStats(t *testing.T) { } fields1 := map[string]interface{}{ - "total": uint64(128), //tags1) - "used": uint64(105), //tags1) - "free": uint64(23), //tags1) - "inodes_total": uint64(1234), //tags1) - "inodes_free": uint64(234), //tags1) - "inodes_used": uint64(1000), //tags1) + "total": uint64(128), + "used": uint64(105), + "free": uint64(23), + "inodes_total": uint64(1234), + "inodes_free": uint64(234), + "inodes_used": uint64(1000), } fields2 := map[string]interface{}{ - "total": uint64(256), //tags2) - "used": uint64(210), //tags2) - "free": uint64(46), //tags2) - "inodes_total": uint64(2468), //tags2) - "inodes_free": uint64(468), //tags2) - "inodes_used": uint64(2000), //tags2) + "total": uint64(256), + "used": uint64(210), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), } acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) // We expect 6 more DiskPoints to show up with an explicit match on "/" - // and /home not matching the /dev in Mountpoints - err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc) + // and /home not matching the /dev in MountPoints + err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) assert.Equal(t, expectedAllDiskPoints+6, acc.NFields()) - // We should see all the diskpoints as Mountpoints includes both + // We should see all the diskpoints as MountPoints includes both // / and /home - err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc) + err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) assert.Equal(t, 2*expectedAllDiskPoints+6, acc.NFields()) } diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index 6e8bfe224..661adb2ac 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -33,8 +33,8 @@ func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) { return r0, r1 } -func (m *MockPS) DiskUsage() ([]*disk.DiskUsageStat, error) { - ret := m.Called() +func (m *MockPS) DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) { + ret := m.Called(mountPointFilter) r0 := ret.Get(0).([]*disk.DiskUsageStat) r1 := ret.Error(1) diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 966747718..fceafd873 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -27,7 +27,7 @@ type DockerContainerStat struct { type PS interface { CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) - DiskUsage() ([]*disk.DiskUsageStat, error) + DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) NetIO() ([]net.NetIOCountersStat, error) NetProto() ([]net.NetProtoCountersStat, error) DiskIO() (map[string]disk.DiskIOCountersStat, error) @@ -67,15 +67,31 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) { return cpuTimes, nil } -func (s *systemPS) DiskUsage() ([]*disk.DiskUsageStat, error) { +func (s *systemPS) DiskUsage( + mountPointFilter []string, +) ([]*disk.DiskUsageStat, error) { parts, err := disk.DiskPartitions(true) if err != nil { return nil, err } + // Make a "set" out of the filter slice + filterSet := make(map[string]bool) + for _, filter := range mountPointFilter { + filterSet[filter] = true + } + var usage []*disk.DiskUsageStat for _, p := range parts { + if len(mountPointFilter) > 0 { + // If the mount point is not a member of the filter set, + // don't gather info on it. + _, ok := filterSet[p.Mountpoint] + if !ok { + continue + } + } if _, err := os.Stat(p.Mountpoint); err == nil { du, err := disk.DiskUsage(p.Mountpoint) if err != nil { From 0cdf1b07e9befd42f65d558a25313563708af816 Mon Sep 17 00:00:00 2001 From: Jack Zampolin Date: Wed, 20 Jan 2016 10:57:35 -0800 Subject: [PATCH 095/103] Fix issue 524 --- CHANGELOG.md | 294 +++++++++--------- CONTRIBUTING.md | 10 +- Godeps | 2 +- README.md | 4 +- accumulator.go | 4 +- agent.go | 8 +- agent_test.go | 6 +- cmd/telegraf/telegraf.go | 8 +- internal/config/config.go | 8 +- internal/config/config_test.go | 8 +- plugins/inputs/aerospike/aerospike.go | 2 +- plugins/inputs/aerospike/aerospike_test.go | 2 +- plugins/inputs/all/all.go | 72 ++--- plugins/inputs/apache/apache.go | 2 +- plugins/inputs/apache/apache_test.go | 2 +- plugins/inputs/bcache/bcache.go | 2 +- plugins/inputs/bcache/bcache_test.go | 2 +- plugins/inputs/disque/disque.go | 2 +- plugins/inputs/disque/disque_test.go | 2 +- plugins/inputs/elasticsearch/elasticsearch.go | 4 +- .../elasticsearch/elasticsearch_test.go | 2 +- plugins/inputs/exec/exec.go | 4 +- plugins/inputs/exec/exec_test.go | 2 +- plugins/inputs/haproxy/haproxy.go | 2 +- plugins/inputs/haproxy/haproxy_test.go | 2 +- plugins/inputs/httpjson/httpjson.go | 4 +- plugins/inputs/httpjson/httpjson_test.go | 2 +- plugins/inputs/influxdb/influxdb.go | 2 +- plugins/inputs/influxdb/influxdb_test.go | 4 +- plugins/inputs/jolokia/jolokia.go | 2 +- plugins/inputs/jolokia/jolokia_test.go | 2 +- .../inputs/kafka_consumer/kafka_consumer.go | 4 +- .../kafka_consumer_integration_test.go | 2 +- .../kafka_consumer/kafka_consumer_test.go | 4 +- plugins/inputs/leofs/leofs.go | 2 +- plugins/inputs/leofs/leofs_test.go | 2 +- plugins/inputs/lustre2/lustre2.go | 4 +- plugins/inputs/lustre2/lustre2_test.go | 2 +- plugins/inputs/mailchimp/mailchimp.go | 2 +- plugins/inputs/mailchimp/mailchimp_test.go | 2 +- plugins/inputs/memcached/memcached.go | 2 +- plugins/inputs/memcached/memcached_test.go | 2 +- plugins/inputs/mongodb/mongodb.go | 2 +- plugins/inputs/mongodb/mongodb_data.go | 2 +- plugins/inputs/mongodb/mongodb_data_test.go | 2 +- plugins/inputs/mongodb/mongodb_server.go | 2 +- plugins/inputs/mongodb/mongodb_server_test.go | 2 +- plugins/inputs/mysql/mysql.go | 2 +- plugins/inputs/mysql/mysql_test.go | 2 +- plugins/inputs/nginx/nginx.go | 2 +- plugins/inputs/nginx/nginx_test.go | 2 +- plugins/inputs/nsq/nsq.go | 2 +- plugins/inputs/nsq/nsq_test.go | 2 +- plugins/inputs/passenger/passenger.go | 2 +- plugins/inputs/passenger/passenger_test.go | 2 +- plugins/inputs/phpfpm/phpfpm.go | 2 +- plugins/inputs/phpfpm/phpfpm_test.go | 2 +- plugins/inputs/ping/ping.go | 2 +- plugins/inputs/ping/ping_test.go | 2 +- plugins/inputs/postgresql/postgresql.go | 2 +- plugins/inputs/postgresql/postgresql_test.go | 2 +- plugins/inputs/procstat/procstat.go | 2 +- plugins/inputs/procstat/procstat_test.go | 2 +- plugins/inputs/procstat/spec_processor.go | 2 +- plugins/inputs/prometheus/prometheus.go | 2 +- plugins/inputs/prometheus/prometheus_test.go | 2 +- plugins/inputs/puppetagent/puppetagent.go | 2 +- .../inputs/puppetagent/puppetagent_test.go | 2 +- plugins/inputs/rabbitmq/rabbitmq.go | 2 +- plugins/inputs/rabbitmq/rabbitmq_test.go | 2 +- plugins/inputs/redis/redis.go | 2 +- plugins/inputs/redis/redis_test.go | 2 +- plugins/inputs/rethinkdb/rethinkdb.go | 2 +- plugins/inputs/rethinkdb/rethinkdb_data.go | 2 +- .../inputs/rethinkdb/rethinkdb_data_test.go | 2 +- plugins/inputs/rethinkdb/rethinkdb_server.go | 2 +- .../inputs/rethinkdb/rethinkdb_server_test.go | 2 +- plugins/inputs/sensors/sensors.go | 2 +- plugins/inputs/statsd/README.md | 2 +- plugins/inputs/statsd/statsd.go | 4 +- plugins/inputs/statsd/statsd_test.go | 2 +- plugins/inputs/system/cpu.go | 2 +- plugins/inputs/system/cpu_test.go | 2 +- plugins/inputs/system/disk.go | 2 +- plugins/inputs/system/disk_test.go | 2 +- plugins/inputs/system/docker.go | 2 +- plugins/inputs/system/docker_test.go | 2 +- plugins/inputs/system/memory.go | 2 +- plugins/inputs/system/memory_test.go | 2 +- plugins/inputs/system/net.go | 2 +- plugins/inputs/system/net_test.go | 2 +- plugins/inputs/system/netstat.go | 2 +- plugins/inputs/system/ps.go | 4 +- plugins/inputs/system/system.go | 2 +- plugins/inputs/trig/trig.go | 2 +- plugins/inputs/trig/trig_test.go | 2 +- plugins/inputs/twemproxy/twemproxy.go | 2 +- plugins/inputs/twemproxy/twemproxy_test.go | 2 +- plugins/inputs/zfs/zfs.go | 4 +- plugins/inputs/zfs/zfs_test.go | 2 +- plugins/inputs/zookeeper/zookeeper.go | 2 +- plugins/inputs/zookeeper/zookeeper_test.go | 2 +- plugins/outputs/all/all.go | 26 +- plugins/outputs/amon/amon.go | 6 +- plugins/outputs/amon/amon_test.go | 4 +- plugins/outputs/amqp/amqp.go | 4 +- plugins/outputs/amqp/amqp_test.go | 2 +- plugins/outputs/datadog/datadog.go | 6 +- plugins/outputs/datadog/datadog_test.go | 4 +- plugins/outputs/graphite/graphite.go | 4 +- plugins/outputs/graphite/graphite_test.go | 2 +- plugins/outputs/influxdb/influxdb.go | 6 +- plugins/outputs/influxdb/influxdb_test.go | 2 +- plugins/outputs/kafka/kafka.go | 4 +- plugins/outputs/kafka/kafka_test.go | 2 +- plugins/outputs/kinesis/kinesis.go | 4 +- plugins/outputs/kinesis/kinesis_test.go | 2 +- plugins/outputs/librato/librato.go | 6 +- plugins/outputs/librato/librato_test.go | 4 +- plugins/outputs/mqtt/mqtt.go | 6 +- plugins/outputs/mqtt/mqtt_test.go | 2 +- plugins/outputs/nsq/nsq.go | 4 +- plugins/outputs/nsq/nsq_test.go | 2 +- plugins/outputs/opentsdb/opentsdb.go | 4 +- plugins/outputs/opentsdb/opentsdb_test.go | 2 +- .../prometheus_client/prometheus_client.go | 4 +- .../prometheus_client_test.go | 6 +- plugins/outputs/registry.go | 2 +- plugins/outputs/riemann/riemann.go | 4 +- plugins/outputs/riemann/riemann_test.go | 2 +- scripts/Vagrantfile | 4 +- scripts/circle-test.sh | 6 +- scripts/telegraf.service | 2 +- testutil/testutil.go | 2 +- 134 files changed, 383 insertions(+), 383 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9319c775e..e86c74ada 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ ## v0.10.1 [unreleased] ### Features -- [#509](https://github.com/influxdb/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! +- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! - [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! - [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! - [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454! @@ -12,14 +12,14 @@ - [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert! - [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain! - [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod! -- [#551](https://github.com/influxdb/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable. +- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable. - [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering. ### Bugfixes -- [#506](https://github.com/influxdb/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! -- [#508](https://github.com/influxdb/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin +- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! +- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin - [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! -- [#543](https://github.com/influxdb/telegraf/issues/543): Statsd Packet size sometimes truncated. +- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated. ## v0.10.0 [2016-01-12] @@ -62,29 +62,29 @@ configurations overwritten by the upgrade. There is a backup stored at ## v0.2.5 [unreleased] ### Features -- [#427](https://github.com/influxdb/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen! -- [#428](https://github.com/influxdb/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot! -- [#449](https://github.com/influxdb/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff +- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen! +- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot! +- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff ### Bugfixes -- [#430](https://github.com/influxdb/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham! -- [#452](https://github.com/influxdb/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham! +- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham! +- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham! ## v0.2.4 [2015-12-08] ### Features -- [#412](https://github.com/influxdb/telegraf/pull/412): Additional memcached stats. Thanks @mgresser! -- [#410](https://github.com/influxdb/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain! -- [#414](https://github.com/influxdb/telegraf/issues/414): Jolokia plugin auth parameters -- [#415](https://github.com/influxdb/telegraf/issues/415): memcached plugin: support unix sockets -- [#418](https://github.com/influxdb/telegraf/pull/418): memcached plugin additional unit tests. -- [#408](https://github.com/influxdb/telegraf/pull/408): MailChimp plugin. -- [#382](https://github.com/influxdb/telegraf/pull/382): Add system wide network protocol stats to `net` plugin. -- [#401](https://github.com/influxdb/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter! +- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser! +- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain! +- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters +- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets +- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests. +- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin. +- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin. +- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter! ### Bugfixes -- [#405](https://github.com/influxdb/telegraf/issues/405): Prometheus output cardinality issue -- [#388](https://github.com/influxdb/telegraf/issues/388): Fix collection hangup when cpu times decrement. +- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue +- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement. ## v0.2.3 [2015-11-30] @@ -113,15 +113,15 @@ same type can be specified, like this: - Aerospike plugin: tag changed from `host` -> `aerospike_host` ### Features -- [#379](https://github.com/influxdb/telegraf/pull/379): Riemann output, thanks @allenj! -- [#375](https://github.com/influxdb/telegraf/pull/375): kafka_consumer service plugin. -- [#392](https://github.com/influxdb/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras! -- [#383](https://github.com/influxdb/telegraf/pull/383): Specify plugins as a list. -- [#354](https://github.com/influxdb/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC! +- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj! +- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin. +- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras! +- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list. +- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC! ### Bugfixes -- [#371](https://github.com/influxdb/telegraf/issues/371): Kafka consumer plugin not functioning. -- [#389](https://github.com/influxdb/telegraf/issues/389): NaN value panic +- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning. +- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic ## v0.2.2 [2015-11-18] @@ -130,7 +130,7 @@ same type can be specified, like this: lists of servers/URLs. 0.2.2 is being released solely to fix that bug ### Bugfixes -- [#377](https://github.com/influxdb/telegraf/pull/377): Fix for duplicate slices in inputs. +- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs. ## v0.2.1 [2015-11-16] @@ -147,22 +147,22 @@ changed to just run docker commands in the Makefile. See `make docker-run` and same type. ### Features -- [#325](https://github.com/influxdb/telegraf/pull/325): NSQ output. Thanks @jrxFive! -- [#318](https://github.com/influxdb/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter! -- [#338](https://github.com/influxdb/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac! -- [#337](https://github.com/influxdb/telegraf/pull/337): Jolokia plugin, thanks @saiello! -- [#350](https://github.com/influxdb/telegraf/pull/350): Amon output. -- [#365](https://github.com/influxdb/telegraf/pull/365): Twemproxy plugin by @codeb2cc -- [#317](https://github.com/influxdb/telegraf/issues/317): ZFS plugin, thanks @cornerot! -- [#364](https://github.com/influxdb/telegraf/pull/364): Support InfluxDB UDP output. -- [#370](https://github.com/influxdb/telegraf/pull/370): Support specifying multiple outputs, as lists. -- [#372](https://github.com/influxdb/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC! +- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive! +- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter! +- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac! +- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello! +- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output. +- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc +- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot! +- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output. +- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists. +- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC! ### Bugfixes -- [#331](https://github.com/influxdb/telegraf/pull/331): Dont overwrite host tag in redis plugin. -- [#336](https://github.com/influxdb/telegraf/pull/336): Mongodb plugin should take 2 measurements. -- [#351](https://github.com/influxdb/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes -- [#360](https://github.com/influxdb/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo! +- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin. +- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements. +- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes +- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo! ## v0.2.0 [2015-10-27] @@ -183,38 +183,38 @@ be controlled via the `round_interval` and `flush_jitter` config options. - Telegraf will now retry metric flushes twice ### Features -- [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info -- [#226](https://github.com/influxdb/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini -- [#90](https://github.com/influxdb/telegraf/issues/90): Add Docker labels to tags in docker plugin -- [#223](https://github.com/influxdb/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee! -- [#227](https://github.com/influxdb/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay! -- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou! +- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info +- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini +- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin +- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee! +- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay! +- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou! - Memory plugin: cached and buffered measurements re-added - Logging: additional logging for each collection interval, track the number of metrics collected and from how many inputs. -- [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib! -- [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou! -- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive! -- [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc -- [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive! -- [#280](https://github.com/influxdb/telegraf/issues/280): Use InfluxDB client v2. -- [#281](https://github.com/influxdb/telegraf/issues/281): Eliminate need to deep copy Batch Points. -- [#286](https://github.com/influxdb/telegraf/issues/286): bcache plugin, thanks @cornerot! -- [#287](https://github.com/influxdb/telegraf/issues/287): Batch AMQP output, thanks @ekini! -- [#301](https://github.com/influxdb/telegraf/issues/301): Collect on even intervals -- [#298](https://github.com/influxdb/telegraf/pull/298): Support retrying output writes -- [#300](https://github.com/influxdb/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter! -- [#322](https://github.com/influxdb/telegraf/issues/322): Librato output. Thanks @jipperinbham! +- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib! +- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou! +- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive! +- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc +- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive! +- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2. +- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points. +- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot! +- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini! +- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals +- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes +- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter! +- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham! ### Bugfixes -- [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini! -- [#232](https://github.com/influxdb/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime! -- [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini! -- [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini! -- [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac! -- [#290](https://github.com/influxdb/telegraf/issues/290): Fix some plugins sending their values as strings. -- [#289](https://github.com/influxdb/telegraf/issues/289): Fix accumulator panic on nil tags. -- [#302](https://github.com/influxdb/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi! +- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini! +- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime! +- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini! +- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini! +- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac! +- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings. +- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags. +- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi! ## v0.1.9 [2015-09-22] @@ -240,27 +240,27 @@ have been renamed for consistency. Some measurements have also been removed from re-added in a "verbose" mode if there is demand for it. ### Features -- [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support -- [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! -- [#203](https://github.com/influxdb/telegraf/pull/200): AMQP output. Thanks @ekini! -- [#182](https://github.com/influxdb/telegraf/pull/182): OpenTSDB output. Thanks @rplessl! -- [#187](https://github.com/influxdb/telegraf/pull/187): Retry output sink connections on startup. -- [#220](https://github.com/influxdb/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee! -- [#217](https://github.com/influxdb/telegraf/pull/217): Add filtering for output sinks +- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support +- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! +- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini! +- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl! +- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup. +- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee! +- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks and filtering when specifying a config file. ### Bugfixes -- [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support -- [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics -- [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug +- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support +- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics +- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug - Fix net plugin on darwin -- [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee! -- [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced! -- [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+ -- [#203](https://github.com/influxdb/telegraf/issues/203): EL5 rpm support. Thanks @ekini! -- [#206](https://github.com/influxdb/telegraf/issues/206): CPU steal/guest values wrong on linux. -- [#212](https://github.com/influxdb/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini! -- [#212](https://github.com/influxdb/telegraf/issues/212): Fix makefile warning. Thanks @ekini! +- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee! +- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced! +- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+ +- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini! +- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux. +- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini! +- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini! ## v0.1.8 [2015-09-04] @@ -269,106 +269,106 @@ and filtering when specifying a config file. - Now using Go 1.5 to build telegraf ### Features -- [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin -- [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 -- [#159](https://github.com/influxdb/telegraf/pull/159): Use second precision for InfluxDB writes -- [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0 -- [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option -- [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3 -- [#169](https://github.com/influxdb/telegraf/pull/169): Ping plugin +- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin +- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 +- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes +- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0 +- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option +- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3 +- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin ### Bugfixes ## v0.1.7 [2015-08-28] ### Features -- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer. -- [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! -- [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. -- [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space -- [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag. -- [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface +- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer. +- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! +- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. +- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space +- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag. +- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface - Indent the toml config file for readability ### Bugfixes -- [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. -- [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix. -- [#131](https://github.com/influxdb/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra! -- [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc! +- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing. +- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix. +- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra! +- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc! ## v0.1.6 [2015-08-20] ### Features -- [#112](https://github.com/influxdb/telegraf/pull/112): Datadog output. Thanks @jipperinbham! -- [#116](https://github.com/influxdb/telegraf/pull/116): Use godep to vendor all dependencies -- [#120](https://github.com/influxdb/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales! +- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham! +- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies +- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales! ### Bugfixes -- [#113](https://github.com/influxdb/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility -- [#118](https://github.com/influxdb/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser! -- [#122](https://github.com/influxdb/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser! -- [#126](https://github.com/influxdb/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error +- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility +- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser! +- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser! +- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error ## v0.1.5 [2015-08-13] ### Features -- [#54](https://github.com/influxdb/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham! -- [#55](https://github.com/influxdb/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar! -- [#71](https://github.com/influxdb/telegraf/pull/71): HAProxy plugin. Thanks @kureikain! -- [#72](https://github.com/influxdb/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk! -- [#73](https://github.com/influxdb/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh! -- [#77](https://github.com/influxdb/telegraf/issues/77): Automatically create database. -- [#79](https://github.com/influxdb/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc! -- [#86](https://github.com/influxdb/telegraf/pull/86): Lustre2 plugin. Thanks srfraser! -- [#91](https://github.com/influxdb/telegraf/pull/91): Unit testing -- [#92](https://github.com/influxdb/telegraf/pull/92): Exec plugin. Thanks @alvaromorales! -- [#98](https://github.com/influxdb/telegraf/pull/98): LeoFS plugin. Thanks @mocchira! -- [#103](https://github.com/influxdb/telegraf/pull/103): Filter by metric tags. Thanks @srfraser! -- [#106](https://github.com/influxdb/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet! -- [#107](https://github.com/influxdb/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham! -- [#108](https://github.com/influxdb/telegraf/issues/108): Support setting per-CPU and total-CPU gathering. -- [#111](https://github.com/influxdb/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! +- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham! +- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar! +- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain! +- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk! +- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh! +- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database. +- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc! +- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser! +- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing +- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales! +- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira! +- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser! +- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet! +- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham! +- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering. +- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! ### Bugfixes -- [#85](https://github.com/influxdb/telegraf/pull/85): Fix GetLocalHost testutil function for mac users -- [#89](https://github.com/influxdb/telegraf/pull/89): go fmt fixes -- [#94](https://github.com/influxdb/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama -- [#101](https://github.com/influxdb/telegraf/issues/101): switch back from master branch if building locally -- [#99](https://github.com/influxdb/telegraf/issues/99): update integer output to new InfluxDB line protocol format +- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users +- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes +- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama +- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally +- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format ## v0.1.4 [2015-07-09] ### Features -- [#56](https://github.com/influxdb/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS! +- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS! ### Bugfixes -- [#50](https://github.com/influxdb/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff! -- [#52](https://github.com/influxdb/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb! +- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff! +- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb! ## v0.1.3 [2015-07-05] ### Features -- [#35](https://github.com/influxdb/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS! -- [#47](https://github.com/influxdb/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham! +- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS! +- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham! ### Bugfixes -- [#45](https://github.com/influxdb/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz! -- [#43](https://github.com/influxdb/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils! +- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz! +- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils! ## v0.1.2 [2015-07-01] ### Features -- [#12](https://github.com/influxdb/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit! -- [#14](https://github.com/influxdb/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to. -- [#16](https://github.com/influxdb/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham! -- [#21](https://github.com/influxdb/telegraf/pull/21): Add memcached plugin. Thanks @Yukki! +- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit! +- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to. +- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham! +- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki! ### Bugfixes -- [#13](https://github.com/influxdb/telegraf/pull/13): Fix the packaging script. -- [#19](https://github.com/influxdb/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain! -- [#20](https://github.com/influxdb/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros! -- [#23](https://github.com/influxdb/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer! -- [#32](https://github.com/influxdb/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff! +- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script. +- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain! +- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros! +- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer! +- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff! ## v0.1.1 [2015-06-19] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a47ad2f17..dfe6a77f4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -19,7 +19,7 @@ and submit new inputs. * Input Plugins should call `inputs.Add` in their `init` function to register themselves. See below for a quick example. * Input Plugins must be added to the -`github.com/influxdb/telegraf/plugins/inputs/all/all.go` file. +`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. * The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is include in `telegraf -sample-config`. * The `Description` function should say in one line what this plugin does. @@ -75,7 +75,7 @@ package simple // simple.go -import "github.com/influxdb/telegraf/plugins/inputs" +import "github.com/influxdata/telegraf/plugins/inputs" type Simple struct { Ok bool @@ -147,7 +147,7 @@ similar constructs. * Outputs should call `outputs.Add` in their `init` function to register themselves. See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdb/telegraf/plugins/outputs/all/all.go` file. +`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. * The `SampleConfig` function should return valid toml that describes how the output can be configured. This is include in `telegraf -sample-config`. * The `Description` function should say in one line what this output does. @@ -171,7 +171,7 @@ package simpleoutput // simpleoutput.go -import "github.com/influxdb/telegraf/plugins/outputs" +import "github.com/influxdata/telegraf/plugins/outputs" type Simple struct { Ok bool @@ -252,7 +252,7 @@ which would take some time to replicate. To overcome this situation we've decided to use docker containers to provide a fast and reproducible environment to test those services which require it. For other situations -(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go) +(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/redis/redis_test.go) a simple mock will suffice. To execute Telegraf tests follow these simple steps: diff --git a/Godeps b/Godeps index f719539f5..c62d6feda 100644 --- a/Godeps +++ b/Godeps @@ -22,7 +22,7 @@ github.com/hailocab/go-hostpool 50839ee41f32bfca8d03a183031aa634b2dc1c64 github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458 github.com/hashicorp/raft b95f335efee1992886864389183ebda0c0a5d0f6 github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee -github.com/influxdb/influxdb 0e0f85a0c1fd1788ae4f9145531b02c539cfa5b5 +github.com/influxdata/influxdb 0e0f85a0c1fd1788ae4f9145531b02c539cfa5b5 github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f diff --git a/README.md b/README.md index 80c739d03..5a997d363 100644 --- a/README.md +++ b/README.md @@ -77,8 +77,8 @@ if you don't have it already. You also must build with golang version 1.5+. 1. [Install Go](https://golang.org/doc/install) 2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH) -3. Run `go get github.com/influxdb/telegraf` -4. Run `cd $GOPATH/src/github.com/influxdb/telegraf` +3. Run `go get github.com/influxdata/telegraf` +4. Run `cd $GOPATH/src/github.com/influxdata/telegraf` 5. Run `make` ### How to use it: diff --git a/accumulator.go b/accumulator.go index 429f3a42c..c628907d7 100644 --- a/accumulator.go +++ b/accumulator.go @@ -7,9 +7,9 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/internal/config" + "github.com/influxdata/telegraf/internal/config" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) type Accumulator interface { diff --git a/agent.go b/agent.go index 4a54d2172..5425fba33 100644 --- a/agent.go +++ b/agent.go @@ -10,11 +10,11 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/internal/config" - "github.com/influxdb/telegraf/plugins/inputs" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/internal/config" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) // Agent runs telegraf and collects data based on the given config diff --git a/agent_test.go b/agent_test.go index 1cb020c7b..3420e665a 100644 --- a/agent_test.go +++ b/agent_test.go @@ -5,12 +5,12 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/internal/config" + "github.com/influxdata/telegraf/internal/config" // needing to load the plugins - _ "github.com/influxdb/telegraf/plugins/inputs/all" + _ "github.com/influxdata/telegraf/plugins/inputs/all" // needing to load the outputs - _ "github.com/influxdb/telegraf/plugins/outputs/all" + _ "github.com/influxdata/telegraf/plugins/outputs/all" ) func TestAgent_LoadPlugin(t *testing.T) { diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 554569012..72fb9fdcf 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -9,10 +9,10 @@ import ( "strings" "syscall" - "github.com/influxdb/telegraf" - "github.com/influxdb/telegraf/internal/config" - _ "github.com/influxdb/telegraf/plugins/inputs/all" - _ "github.com/influxdb/telegraf/plugins/outputs/all" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/config" + _ "github.com/influxdata/telegraf/plugins/inputs/all" + _ "github.com/influxdata/telegraf/plugins/outputs/all" ) var fDebug = flag.Bool("debug", false, diff --git a/internal/config/config.go b/internal/config/config.go index e4d7fbc9d..3b5e4ff17 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -10,14 +10,14 @@ import ( "strings" "time" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/inputs" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/naoina/toml" "github.com/naoina/toml/ast" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) // Config specifies the URL/user/password for the database that telegraf diff --git a/internal/config/config_test.go b/internal/config/config_test.go index c8ed79bdf..40af30c1e 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -4,10 +4,10 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/plugins/inputs" - "github.com/influxdb/telegraf/plugins/inputs/exec" - "github.com/influxdb/telegraf/plugins/inputs/memcached" - "github.com/influxdb/telegraf/plugins/inputs/procstat" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/exec" + "github.com/influxdata/telegraf/plugins/inputs/memcached" + "github.com/influxdata/telegraf/plugins/inputs/procstat" "github.com/stretchr/testify/assert" ) diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 5f847ebfa..aa015a4c0 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/binary" "fmt" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "net" "strconv" "strings" diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index 3f4d909a2..74b70eb1d 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index c9e8ea4c8..cb83dfdf9 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -1,40 +1,40 @@ package all import ( - _ "github.com/influxdb/telegraf/plugins/inputs/aerospike" - _ "github.com/influxdb/telegraf/plugins/inputs/apache" - _ "github.com/influxdb/telegraf/plugins/inputs/bcache" - _ "github.com/influxdb/telegraf/plugins/inputs/disque" - _ "github.com/influxdb/telegraf/plugins/inputs/elasticsearch" - _ "github.com/influxdb/telegraf/plugins/inputs/exec" - _ "github.com/influxdb/telegraf/plugins/inputs/haproxy" - _ "github.com/influxdb/telegraf/plugins/inputs/httpjson" - _ "github.com/influxdb/telegraf/plugins/inputs/influxdb" - _ "github.com/influxdb/telegraf/plugins/inputs/jolokia" - _ "github.com/influxdb/telegraf/plugins/inputs/kafka_consumer" - _ "github.com/influxdb/telegraf/plugins/inputs/leofs" - _ "github.com/influxdb/telegraf/plugins/inputs/lustre2" - _ "github.com/influxdb/telegraf/plugins/inputs/mailchimp" - _ "github.com/influxdb/telegraf/plugins/inputs/memcached" - _ "github.com/influxdb/telegraf/plugins/inputs/mongodb" - _ "github.com/influxdb/telegraf/plugins/inputs/mysql" - _ "github.com/influxdb/telegraf/plugins/inputs/nginx" - _ "github.com/influxdb/telegraf/plugins/inputs/nsq" - _ "github.com/influxdb/telegraf/plugins/inputs/passenger" - _ "github.com/influxdb/telegraf/plugins/inputs/phpfpm" - _ "github.com/influxdb/telegraf/plugins/inputs/ping" - _ "github.com/influxdb/telegraf/plugins/inputs/postgresql" - _ "github.com/influxdb/telegraf/plugins/inputs/procstat" - _ "github.com/influxdb/telegraf/plugins/inputs/prometheus" - _ "github.com/influxdb/telegraf/plugins/inputs/puppetagent" - _ "github.com/influxdb/telegraf/plugins/inputs/rabbitmq" - _ "github.com/influxdb/telegraf/plugins/inputs/redis" - _ "github.com/influxdb/telegraf/plugins/inputs/rethinkdb" - _ "github.com/influxdb/telegraf/plugins/inputs/sensors" - _ "github.com/influxdb/telegraf/plugins/inputs/statsd" - _ "github.com/influxdb/telegraf/plugins/inputs/system" - _ "github.com/influxdb/telegraf/plugins/inputs/trig" - _ "github.com/influxdb/telegraf/plugins/inputs/twemproxy" - _ "github.com/influxdb/telegraf/plugins/inputs/zfs" - _ "github.com/influxdb/telegraf/plugins/inputs/zookeeper" + _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" + _ "github.com/influxdata/telegraf/plugins/inputs/apache" + _ "github.com/influxdata/telegraf/plugins/inputs/bcache" + _ "github.com/influxdata/telegraf/plugins/inputs/disque" + _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/inputs/exec" + _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" + _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" + _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" + _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" + _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/leofs" + _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" + _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" + _ "github.com/influxdata/telegraf/plugins/inputs/memcached" + _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" + _ "github.com/influxdata/telegraf/plugins/inputs/mysql" + _ "github.com/influxdata/telegraf/plugins/inputs/nginx" + _ "github.com/influxdata/telegraf/plugins/inputs/nsq" + _ "github.com/influxdata/telegraf/plugins/inputs/passenger" + _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" + _ "github.com/influxdata/telegraf/plugins/inputs/ping" + _ "github.com/influxdata/telegraf/plugins/inputs/postgresql" + _ "github.com/influxdata/telegraf/plugins/inputs/procstat" + _ "github.com/influxdata/telegraf/plugins/inputs/prometheus" + _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" + _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" + _ "github.com/influxdata/telegraf/plugins/inputs/redis" + _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" + _ "github.com/influxdata/telegraf/plugins/inputs/sensors" + _ "github.com/influxdata/telegraf/plugins/inputs/statsd" + _ "github.com/influxdata/telegraf/plugins/inputs/system" + _ "github.com/influxdata/telegraf/plugins/inputs/trig" + _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" + _ "github.com/influxdata/telegraf/plugins/inputs/zfs" + _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" ) diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index f48bac336..317a635d3 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Apache struct { diff --git a/plugins/inputs/apache/apache_test.go b/plugins/inputs/apache/apache_test.go index 16c319974..8eed61ca6 100644 --- a/plugins/inputs/apache/apache_test.go +++ b/plugins/inputs/apache/apache_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 146849eef..b6d6eb130 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Bcache struct { diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index 0f34d016b..bd191528f 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 334fdd554..364e78fbc 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Disque struct { diff --git a/plugins/inputs/disque/disque_test.go b/plugins/inputs/disque/disque_test.go index 91c7dc979..f060e9568 100644 --- a/plugins/inputs/disque/disque_test.go +++ b/plugins/inputs/disque/disque_test.go @@ -6,7 +6,7 @@ import ( "net" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index f8185a053..9b59537c0 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -6,8 +6,8 @@ import ( "net/http" "time" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) const statsPath = "/_nodes/stats" diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 62c3cb8fd..f94d3f9ac 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index b3c1001f8..603ba1464 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -8,8 +8,8 @@ import ( "github.com/gonuts/go-shellquote" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) const sampleConfig = ` diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 64fd69fce..8bf47c1d0 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 23b92fc26..c2e334424 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -3,7 +3,7 @@ package haproxy import ( "encoding/csv" "fmt" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "io" "net/http" "net/url" diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index e514bc7ad..7b86f2b50 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "net/http" diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index 5763fd6fa..b90a02e5b 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -11,8 +11,8 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) type HttpJson struct { diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 3f14290ff..0ea5e9e42 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index cf5742e1d..e65c8afd2 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -8,7 +8,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type InfluxDB struct { diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index 0e02cc6bd..e7b43e7bc 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -5,8 +5,8 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/plugins/inputs/influxdb" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/plugins/inputs/influxdb" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 36811bd27..7579ecb4a 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -8,7 +8,7 @@ import ( "net/http" "net/url" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Server struct { diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index d29b8a810..63b47ebff 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" _ "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index f3558e2e5..a0f1d3d11 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -5,8 +5,8 @@ import ( "strings" "sync" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/Shopify/sarama" "github.com/wvanbergen/kafka/consumergroup" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go index 9f554d9ab..0611467ff 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/Shopify/sarama" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index dcd38f6c4..560e130c0 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/telegraf/testutil" "github.com/Shopify/sarama" "github.com/stretchr/testify/assert" diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go index c65db5f37..f4dd314b7 100644 --- a/plugins/inputs/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -3,7 +3,7 @@ package leofs import ( "bufio" "fmt" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "net/url" "os/exec" "strconv" diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go index 48a82a18a..292cd15d0 100644 --- a/plugins/inputs/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -1,7 +1,7 @@ package leofs import ( - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "io/ioutil" diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 90222af79..d6266de73 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -13,8 +13,8 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) // Lustre proc files can change between versions, so we want to future-proof diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index cea98fa1e..9e560df2c 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go index 4b148a95c..284ac61e1 100644 --- a/plugins/inputs/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type MailChimp struct { diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index 5e5394581..0c4dab56d 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -7,7 +7,7 @@ import ( "net/url" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 1d9ee9547..078f05aa3 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -8,7 +8,7 @@ import ( "strconv" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) // Memcached is a memcached plugin diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go index 6e2f8452a..210adffdb 100644 --- a/plugins/inputs/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 4cb3ffee5..ce73c3a14 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "gopkg.in/mgo.v2" ) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 15f8c479b..c0c68c330 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -5,7 +5,7 @@ import ( "reflect" "strconv" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type MongodbData struct { diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 5d24a7a09..3166ab018 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 795cf97d7..87552f906 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -4,7 +4,7 @@ import ( "net/url" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index ec536bbef..52869724c 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index f9126b5ea..7434a282a 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -6,7 +6,7 @@ import ( "strings" _ "github.com/go-sql-driver/mysql" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Mysql struct { diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 2362002bc..855e8ba52 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index 18e3244f7..6ea665b7e 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Nginx struct { diff --git a/plugins/inputs/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go index 9d694bc26..895e3e583 100644 --- a/plugins/inputs/nginx/nginx_test.go +++ b/plugins/inputs/nginx/nginx_test.go @@ -8,7 +8,7 @@ import ( "net/url" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 48a709a37..9b680a0db 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -31,7 +31,7 @@ import ( "sync" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) // Might add Lookupd endpoints for cluster discovery diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index fc34a710b..23fd19a42 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -7,7 +7,7 @@ import ( "net/url" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index 2d98f8c58..c5b049b7c 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "golang.org/x/net/html/charset" ) diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index 3440c5337..6124a968e 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -6,7 +6,7 @@ import ( "os" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 5600334b2..0166f7bea 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -12,7 +12,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) const ( diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 58db0cf8b..c965e5a13 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -10,7 +10,7 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index ff7cebb99..aa1d5bf36 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) // HostPinger is a function that runs the "ping" function using a list of diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index b98a08be8..be603a49c 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -6,7 +6,7 @@ import ( "sort" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index c356cea77..3398f5ac0 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -6,7 +6,7 @@ import ( "fmt" "strings" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" _ "github.com/lib/pq" ) diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index 0f4ff5579..8baae39a6 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 9f30eea83..aa56bd501 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -10,7 +10,7 @@ import ( "github.com/shirou/gopsutil/process" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Procstat struct { diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index b9eb4a209..bf5790f67 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" ) func TestGather(t *testing.T) { diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go index 9f9ad5342..b66572f2e 100644 --- a/plugins/inputs/procstat/spec_processor.go +++ b/plugins/inputs/procstat/spec_processor.go @@ -6,7 +6,7 @@ import ( "github.com/shirou/gopsutil/process" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type SpecProcessor struct { diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 758788b8d..e6374b8d6 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -3,7 +3,7 @@ package prometheus import ( "errors" "fmt" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "io" diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 901fe2da2..2009cbb11 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index d0bedae9e..eee9186b3 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -8,7 +8,7 @@ import ( "reflect" "strings" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) // PuppetAgent is a PuppetAgent plugin diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go index 1d854ab46..d1470bc27 100644 --- a/plugins/inputs/puppetagent/puppetagent_test.go +++ b/plugins/inputs/puppetagent/puppetagent_test.go @@ -1,7 +1,7 @@ package puppetagent import ( - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "testing" ) diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index fc95af494..c062b3164 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -7,7 +7,7 @@ import ( "strconv" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) const DefaultUsername = "guest" diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 12b7aee70..4bdc980db 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index c9e98e886..735aa2052 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Redis struct { diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index ec0cf998c..612595cdb 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index 17873f1ce..1f28dab25 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -5,7 +5,7 @@ import ( "net/url" "sync" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "gopkg.in/dancannon/gorethink.v1" ) diff --git a/plugins/inputs/rethinkdb/rethinkdb_data.go b/plugins/inputs/rethinkdb/rethinkdb_data.go index 3ea429d82..8093fa5ba 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data.go @@ -4,7 +4,7 @@ import ( "reflect" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type serverStatus struct { diff --git a/plugins/inputs/rethinkdb/rethinkdb_data_test.go b/plugins/inputs/rethinkdb/rethinkdb_data_test.go index 3441370a3..6159016c0 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data_test.go @@ -3,7 +3,7 @@ package rethinkdb import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) diff --git a/plugins/inputs/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go index 4af916a4d..6ca7a3af1 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "gopkg.in/dancannon/gorethink.v1" ) diff --git a/plugins/inputs/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go index 21ab0dbbd..c4b644222 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go @@ -5,7 +5,7 @@ package rethinkdb import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 18e26278d..81001abd8 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -7,7 +7,7 @@ import ( "github.com/md14454/gosensors" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Sensors struct { diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 76255f3b0..49b8ff842 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -157,4 +157,4 @@ mem.cached.localhost:256|g ``` There are many more options available, -[More details can be found here](https://github.com/influxdb/influxdb/tree/master/services/graphite#templates) +[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index d9f597bcf..1fac4aba0 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -10,9 +10,9 @@ import ( "strings" "sync" - "github.com/influxdb/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/graphite" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) const UDP_PACKET_SIZE int = 1500 diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index a8aae2e9e..6fc1f6933 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" ) // Invalid lines should return an error diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go index 298df20bb..95c854b2c 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/shirou/gopsutil/cpu" ) diff --git a/plugins/inputs/system/cpu_test.go b/plugins/inputs/system/cpu_test.go index c85734adc..77d90e2a5 100644 --- a/plugins/inputs/system/cpu_test.go +++ b/plugins/inputs/system/cpu_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/cpu" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index de63ff0b8..c6b23492b 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -3,7 +3,7 @@ package system import ( "fmt" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type DiskStats struct { diff --git a/plugins/inputs/system/disk_test.go b/plugins/inputs/system/disk_test.go index 25d991ca3..ec4182cb3 100644 --- a/plugins/inputs/system/disk_test.go +++ b/plugins/inputs/system/disk_test.go @@ -3,7 +3,7 @@ package system import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/disk" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/plugins/inputs/system/docker.go b/plugins/inputs/system/docker.go index 3a77fad5f..4f60c771e 100644 --- a/plugins/inputs/system/docker.go +++ b/plugins/inputs/system/docker.go @@ -5,7 +5,7 @@ package system import ( "fmt" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type DockerStats struct { diff --git a/plugins/inputs/system/docker_test.go b/plugins/inputs/system/docker_test.go index 9ed06dd3e..6f680d8be 100644 --- a/plugins/inputs/system/docker_test.go +++ b/plugins/inputs/system/docker_test.go @@ -5,7 +5,7 @@ package system import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/docker" diff --git a/plugins/inputs/system/memory.go b/plugins/inputs/system/memory.go index f58a8cd92..32a2f2b09 100644 --- a/plugins/inputs/system/memory.go +++ b/plugins/inputs/system/memory.go @@ -3,7 +3,7 @@ package system import ( "fmt" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type MemStats struct { diff --git a/plugins/inputs/system/memory_test.go b/plugins/inputs/system/memory_test.go index bf461e2e2..0a85bc869 100644 --- a/plugins/inputs/system/memory_test.go +++ b/plugins/inputs/system/memory_test.go @@ -3,7 +3,7 @@ package system import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/mem" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/system/net.go b/plugins/inputs/system/net.go index 42f0d5854..7f71f5200 100644 --- a/plugins/inputs/system/net.go +++ b/plugins/inputs/system/net.go @@ -5,7 +5,7 @@ import ( "net" "strings" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type NetIOStats struct { diff --git a/plugins/inputs/system/net_test.go b/plugins/inputs/system/net_test.go index 3ec2cb990..3297acf07 100644 --- a/plugins/inputs/system/net_test.go +++ b/plugins/inputs/system/net_test.go @@ -4,7 +4,7 @@ import ( "syscall" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/net" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/system/netstat.go b/plugins/inputs/system/netstat.go index 71f2a0da6..0fe704ee0 100644 --- a/plugins/inputs/system/netstat.go +++ b/plugins/inputs/system/netstat.go @@ -4,7 +4,7 @@ import ( "fmt" "syscall" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type NetStats struct { diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index fceafd873..17971c5f5 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -5,8 +5,8 @@ import ( "os" "strings" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" dc "github.com/fsouza/go-dockerclient" "github.com/shirou/gopsutil/cpu" diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index 813ab84f5..4a0a76d48 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -8,7 +8,7 @@ import ( "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/load" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type SystemStats struct{} diff --git a/plugins/inputs/trig/trig.go b/plugins/inputs/trig/trig.go index 13c44e247..604f9734a 100644 --- a/plugins/inputs/trig/trig.go +++ b/plugins/inputs/trig/trig.go @@ -3,7 +3,7 @@ package trig import ( "math" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Trig struct { diff --git a/plugins/inputs/trig/trig_test.go b/plugins/inputs/trig/trig_test.go index 82605b0a5..1471edbea 100644 --- a/plugins/inputs/trig/trig_test.go +++ b/plugins/inputs/trig/trig_test.go @@ -4,7 +4,7 @@ import ( "math" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" ) func TestTrig(t *testing.T) { diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go index 95c9d0ba0..6dcce8058 100644 --- a/plugins/inputs/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -7,7 +7,7 @@ import ( "net" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) type Twemproxy struct { diff --git a/plugins/inputs/twemproxy/twemproxy_test.go b/plugins/inputs/twemproxy/twemproxy_test.go index 60209d1a1..dd79048e0 100644 --- a/plugins/inputs/twemproxy/twemproxy_test.go +++ b/plugins/inputs/twemproxy/twemproxy_test.go @@ -5,7 +5,7 @@ import ( "net" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/zfs/zfs.go b/plugins/inputs/zfs/zfs.go index 109b261f8..13f2d9806 100644 --- a/plugins/inputs/zfs/zfs.go +++ b/plugins/inputs/zfs/zfs.go @@ -6,8 +6,8 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) type Zfs struct { diff --git a/plugins/inputs/zfs/zfs_test.go b/plugins/inputs/zfs/zfs_test.go index 9530084d0..e40d91c02 100644 --- a/plugins/inputs/zfs/zfs_test.go +++ b/plugins/inputs/zfs/zfs_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index 93a07840d..c2940f5e3 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/influxdb/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs" ) // Zookeeper is a zookeeper plugin diff --git a/plugins/inputs/zookeeper/zookeeper_test.go b/plugins/inputs/zookeeper/zookeeper_test.go index 354382ecc..bc02ffb9d 100644 --- a/plugins/inputs/zookeeper/zookeeper_test.go +++ b/plugins/inputs/zookeeper/zookeeper_test.go @@ -3,7 +3,7 @@ package zookeeper import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 7eedb592a..e05d53acc 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -1,17 +1,17 @@ package all import ( - _ "github.com/influxdb/telegraf/plugins/outputs/amon" - _ "github.com/influxdb/telegraf/plugins/outputs/amqp" - _ "github.com/influxdb/telegraf/plugins/outputs/datadog" - _ "github.com/influxdb/telegraf/plugins/outputs/graphite" - _ "github.com/influxdb/telegraf/plugins/outputs/influxdb" - _ "github.com/influxdb/telegraf/plugins/outputs/kafka" - _ "github.com/influxdb/telegraf/plugins/outputs/kinesis" - _ "github.com/influxdb/telegraf/plugins/outputs/librato" - _ "github.com/influxdb/telegraf/plugins/outputs/mqtt" - _ "github.com/influxdb/telegraf/plugins/outputs/nsq" - _ "github.com/influxdb/telegraf/plugins/outputs/opentsdb" - _ "github.com/influxdb/telegraf/plugins/outputs/prometheus_client" - _ "github.com/influxdb/telegraf/plugins/outputs/riemann" + _ "github.com/influxdata/telegraf/plugins/outputs/amon" + _ "github.com/influxdata/telegraf/plugins/outputs/amqp" + _ "github.com/influxdata/telegraf/plugins/outputs/datadog" + _ "github.com/influxdata/telegraf/plugins/outputs/graphite" + _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" + _ "github.com/influxdata/telegraf/plugins/outputs/kafka" + _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" + _ "github.com/influxdata/telegraf/plugins/outputs/librato" + _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" + _ "github.com/influxdata/telegraf/plugins/outputs/nsq" + _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" + _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" + _ "github.com/influxdata/telegraf/plugins/outputs/riemann" ) diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index d8fceb035..e9f2c9f30 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -8,9 +8,9 @@ import ( "net/http" "strings" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) type Amon struct { diff --git a/plugins/outputs/amon/amon_test.go b/plugins/outputs/amon/amon_test.go index cfe4e9f23..b725bab9e 100644 --- a/plugins/outputs/amon/amon_test.go +++ b/plugins/outputs/amon/amon_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) func TestBuildPoint(t *testing.T) { diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index e1d6302a1..bdbf47b86 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -10,8 +10,8 @@ import ( "sync" "time" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/streadway/amqp" ) diff --git a/plugins/outputs/amqp/amqp_test.go b/plugins/outputs/amqp/amqp_test.go index 4c6a9a8d3..a65634cab 100644 --- a/plugins/outputs/amqp/amqp_test.go +++ b/plugins/outputs/amqp/amqp_test.go @@ -3,7 +3,7 @@ package amqp import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 4231b1f28..7d6539789 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -10,9 +10,9 @@ import ( "sort" "strings" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) type Datadog struct { diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index fe0b7c1fe..968a8e9c8 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index dd2af8eb1..f9781041f 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -3,8 +3,8 @@ package graphite import ( "errors" "fmt" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" "log" "math/rand" "net" diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index e9000c3c7..be4cc2472 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index f6b79b009..f45f020b6 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -9,9 +9,9 @@ import ( "strings" "time" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) type InfluxDB struct { diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index cf1d7d9b3..5da0c056f 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 55ef35fb4..b16347c92 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" "github.com/Shopify/sarama" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" "io/ioutil" ) diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index 2c1734857..2af343778 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -3,7 +3,7 @@ package kafka import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 11e26fdf9..f04f1c7c6 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -15,8 +15,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" ) type KinesisOutput struct { diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 4c667c860..76eb6ebca 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -1,7 +1,7 @@ package kinesis import ( - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" "testing" ) diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 75aecb756..6afcb4542 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -7,9 +7,9 @@ import ( "log" "net/http" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) type Librato struct { diff --git a/plugins/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go index 129352027..25418baa5 100644 --- a/plugins/outputs/librato/librato_test.go +++ b/plugins/outputs/librato/librato_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index a70b2e575..7c47cf741 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -10,9 +10,9 @@ import ( "sync" paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/internal" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) const MaxClientIdLen = 8 diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 0922b83ed..f25f4497f 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -3,7 +3,7 @@ package mqtt import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index db58670a2..79818ec5c 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -2,8 +2,8 @@ package nsq import ( "fmt" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/nsqio/go-nsq" ) diff --git a/plugins/outputs/nsq/nsq_test.go b/plugins/outputs/nsq/nsq_test.go index 4448383f4..b2d703a70 100644 --- a/plugins/outputs/nsq/nsq_test.go +++ b/plugins/outputs/nsq/nsq_test.go @@ -3,7 +3,7 @@ package nsq import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 236385d71..6e9f3e26a 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -8,8 +8,8 @@ import ( "strings" "time" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" ) type OpenTSDB struct { diff --git a/plugins/outputs/opentsdb/opentsdb_test.go b/plugins/outputs/opentsdb/opentsdb_test.go index f75bd7205..92df3fb52 100644 --- a/plugins/outputs/opentsdb/opentsdb_test.go +++ b/plugins/outputs/opentsdb/opentsdb_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 1fbf9056a..4e429722a 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -5,8 +5,8 @@ import ( "log" "net/http" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/prometheus/client_golang/prometheus" ) diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index dc353486c..73163ee1d 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -5,9 +5,9 @@ import ( "github.com/stretchr/testify/require" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/inputs/prometheus" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/inputs/prometheus" + "github.com/influxdata/telegraf/testutil" ) var pTesting *PrometheusClient diff --git a/plugins/outputs/registry.go b/plugins/outputs/registry.go index d7ea30492..d4c6ba1e5 100644 --- a/plugins/outputs/registry.go +++ b/plugins/outputs/registry.go @@ -1,7 +1,7 @@ package outputs import ( - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) type Output interface { diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index afbde0051..c1b22ec46 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -6,8 +6,8 @@ import ( "os" "github.com/amir/raidman" - "github.com/influxdb/influxdb/client/v2" - "github.com/influxdb/telegraf/plugins/outputs" + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" ) type Riemann struct { diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go index 31e9478b1..8b3f27ac0 100644 --- a/plugins/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -3,7 +3,7 @@ package riemann import ( "testing" - "github.com/influxdb/telegraf/testutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/scripts/Vagrantfile b/scripts/Vagrantfile index 3c0199bdb..a04450d6d 100644 --- a/scripts/Vagrantfile +++ b/scripts/Vagrantfile @@ -7,7 +7,7 @@ VAGRANTFILE_API_VERSION = "2" Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.box = "ubuntu/trusty64" - config.vm.synced_folder "..", "/home/vagrant/go/src/github.com/influxdb/telegraf", + config.vm.synced_folder "..", "/home/vagrant/go/src/github.com/influxdata/telegraf", type: "rsync", rsync__args: ["--verbose", "--archive", "--delete", "-z", "--safe-links"], rsync__exclude: ["./telegraf", ".vagrant/"] @@ -26,7 +26,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| gvm use go1.4.2 --default echo "export PATH=$PATH:$GOPATH/bin" >> "$HOME/.bashrc" echo 'export GOPATH=/home/vagrant/go' >> "$HOME/.bashrc" - cd "$HOME/go/src/github.com/influxdb/telegraf" &&\ + cd "$HOME/go/src/github.com/influxdata/telegraf" &&\ rm -rf Godeps/_workspace/pkg &&\ GOPATH="$HOME/go" make SHELL diff --git a/scripts/circle-test.sh b/scripts/circle-test.sh index 96319bf72..bbad51506 100755 --- a/scripts/circle-test.sh +++ b/scripts/circle-test.sh @@ -34,7 +34,7 @@ export GOPATH=$BUILD_DIR # Turning off GOGC speeds up build times export GOGC=off export PATH=$GOPATH/bin:$PATH -exit_if_fail mkdir -p $GOPATH/src/github.com/influxdb +exit_if_fail mkdir -p $GOPATH/src/github.com/influxdata # Dump some test config to the log. echo "Test configuration" @@ -44,8 +44,8 @@ echo "\$GOPATH: $GOPATH" echo "\$CIRCLE_BRANCH: $CIRCLE_BRANCH" # Move the checked-out source to a better location -exit_if_fail mv $HOME/telegraf $GOPATH/src/github.com/influxdb -exit_if_fail cd $GOPATH/src/github.com/influxdb/telegraf +exit_if_fail mv $HOME/telegraf $GOPATH/src/github.com/influxdata +exit_if_fail cd $GOPATH/src/github.com/influxdata/telegraf # Verify that go fmt has been run check_go_fmt diff --git a/scripts/telegraf.service b/scripts/telegraf.service index d92f3072c..6f4450402 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -1,6 +1,6 @@ [Unit] Description=The plugin-driven server agent for reporting metrics into InfluxDB -Documentation=https://github.com/influxdb/telegraf +Documentation=https://github.com/influxdata/telegraf After=network.target [Service] diff --git a/testutil/testutil.go b/testutil/testutil.go index 581220299..436b57361 100644 --- a/testutil/testutil.go +++ b/testutil/testutil.go @@ -6,7 +6,7 @@ import ( "os" "time" - "github.com/influxdb/influxdb/client/v2" + "github.com/influxdata/influxdb/client/v2" ) var localhost = "localhost" From 8192572e2361a0273ccd1d6142c503c352afdb12 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 20 Jan 2016 12:06:58 -0700 Subject: [PATCH 096/103] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e86c74ada..eaf32638f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,12 +14,14 @@ - [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod! - [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable. - [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering. +- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics. ### Bugfixes - [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! - [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin - [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! - [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated. +- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats. ## v0.10.0 [2016-01-12] From 55c07f23b05a54a93c072dc0c7f863f2e9683007 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 20 Jan 2016 12:30:52 -0700 Subject: [PATCH 097/103] Update contributing document --- CONTRIBUTING.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dfe6a77f4..f7e2ec86f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,8 +1,23 @@ +## Steps for Contributing: + +1. [Sign the CLA](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#sign-the-cla) +1. Write your input or output plugin (see below for details) +1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go` +1. If your plugin requires a new Go package, +[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency) + ## Sign the CLA Before we can merge a pull request, you will need to sign the CLA, which can be found [on our website](http://influxdb.com/community/cla.html) +## Adding a dependency + +Assuming you can already build the project: + +1. `go get github.com/sparrc/gdm` +1. `gdm save` + ## Input Plugins This section is for developers who want to create new collection inputs. From 6647cfc2286f3c07142eb7439b5f6ef6b6d841d4 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 20 Jan 2016 12:18:10 -0700 Subject: [PATCH 098/103] statsd: If parsing a value to int fails, try to float and cast to int fixes #556 --- plugins/inputs/statsd/statsd.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 1fac4aba0..6b7a427b7 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -337,10 +337,15 @@ func (s *Statsd) parseStatsdLine(line string) error { } m.floatvalue = v case "c", "s": + var v int64 v, err := strconv.ParseInt(pipesplit[0], 10, 64) if err != nil { - log.Printf("Error: parsing value to int64: %s\n", line) - return errors.New("Error Parsing statsd line") + v2, err2 := strconv.ParseFloat(pipesplit[0], 64) + if err2 != nil { + log.Printf("Error: parsing value to int64: %s\n", line) + return errors.New("Error Parsing statsd line") + } + v = int64(v2) } // If a sample rate is given with a counter, divide value by the rate if m.samplerate != 0 && m.mtype == "c" { From f24f5e98ddfc9b4f1557ff2a69844cc8826f70b1 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 20 Jan 2016 14:43:35 -0700 Subject: [PATCH 099/103] Remove go get ./... from the Makefile --- Godeps | 1 + Makefile | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/Godeps b/Godeps index c62d6feda..9f46fd79b 100644 --- a/Godeps +++ b/Godeps @@ -23,6 +23,7 @@ github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458 github.com/hashicorp/raft b95f335efee1992886864389183ebda0c0a5d0f6 github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee github.com/influxdata/influxdb 0e0f85a0c1fd1788ae4f9145531b02c539cfa5b5 +github.com/influxdb/influxdb 0e0f85a0c1fd1788ae4f9145531b02c539cfa5b5 github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264 github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38 github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f diff --git a/Makefile b/Makefile index 2ae4d1257..3dedfb703 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,6 @@ dev: prepare # Get dependencies and use gdm to checkout changesets prepare: - go get ./... go get github.com/sparrc/gdm gdm restore From e0dc1ef5bd1c424aadbcb0c58029fe67df32cb33 Mon Sep 17 00:00:00 2001 From: Stephen Kwong Date: Mon, 18 Jan 2016 11:39:14 -0800 Subject: [PATCH 100/103] Add Cloudwatch output closes #553 --- CHANGELOG.md | 1 + README.md | 3 +- plugins/outputs/all/all.go | 1 + plugins/outputs/cloudwatch/README.md | 33 +++ plugins/outputs/cloudwatch/cloudwatch.go | 236 ++++++++++++++++++ plugins/outputs/cloudwatch/cloudwatch_test.go | 88 +++++++ 6 files changed, 361 insertions(+), 1 deletion(-) create mode 100644 plugins/outputs/cloudwatch/README.md create mode 100644 plugins/outputs/cloudwatch/cloudwatch.go create mode 100644 plugins/outputs/cloudwatch/cloudwatch_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index eaf32638f..2cc1308b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable. - [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering. - [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics. +- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2! ### Bugfixes - [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/README.md b/README.md index 5a997d363..89d26faad 100644 --- a/README.md +++ b/README.md @@ -189,10 +189,11 @@ want to add support for another service or third-party API. * influxdb * amon * amqp +* aws kinesis +* aws cloudwatch * datadog * graphite * kafka -* amazon kinesis * librato * mqtt * nsq diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index e05d53acc..ac8357c90 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -3,6 +3,7 @@ package all import ( _ "github.com/influxdata/telegraf/plugins/outputs/amon" _ "github.com/influxdata/telegraf/plugins/outputs/amqp" + _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/outputs/datadog" _ "github.com/influxdata/telegraf/plugins/outputs/graphite" _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md new file mode 100644 index 000000000..853d038c3 --- /dev/null +++ b/plugins/outputs/cloudwatch/README.md @@ -0,0 +1,33 @@ +## Amazon CloudWatch Output for Telegraf + +This plugin will send points to Amazon CloudWatch. + +## Amazon Authentication + +This plugin uses a credential chain for Authentication with the CloudWatch +API endpoint. In the following order the plugin will attempt to authenticate. +1. [IAMS Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) +2. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk) +3. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk) + +## Config + +For this output plugin to function correctly the following variables +must be configured. + +* region +* namespace + +### region + +The region is the Amazon region that you wish to connect to. +Examples include but are not limited to: +* us-west-1 +* us-west-2 +* us-east-1 +* ap-southeast-1 +* ap-southeast-2 + +### namespace + +The namespace used for AWS CloudWatch metrics. diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go new file mode 100644 index 000000000..1e20836da --- /dev/null +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -0,0 +1,236 @@ +package cloudwatch + +import ( + "log" + "math" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" + + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type CloudWatch struct { + Region string // AWS Region + Namespace string // CloudWatch Metrics Namespace + svc *cloudwatch.CloudWatch +} + +var sampleConfig = ` + # Amazon REGION + region = 'us-east-1' + + # Namespace for the CloudWatch MetricDatums + namespace = 'InfluxData/Telegraf' +` + +func (c *CloudWatch) SampleConfig() string { + return sampleConfig +} + +func (c *CloudWatch) Description() string { + return "Configuration for AWS CloudWatch output." +} + +func (c *CloudWatch) Connect() error { + Config := &aws.Config{ + Region: aws.String(c.Region), + Credentials: credentials.NewChainCredentials( + []credentials.Provider{ + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + }), + } + + svc := cloudwatch.New(session.New(Config)) + + params := &cloudwatch.ListMetricsInput{ + Namespace: aws.String(c.Namespace), + } + + _, err := svc.ListMetrics(params) // Try a read-only call to test connection. + + if err != nil { + log.Printf("cloudwatch: Error in ListMetrics API call : %+v \n", err.Error()) + } + + c.svc = svc + + return err +} + +func (c *CloudWatch) Close() error { + return nil +} + +func (c *CloudWatch) Write(points []*client.Point) error { + for _, pt := range points { + err := c.WriteSinglePoint(pt) + if err != nil { + return err + } + } + + return nil +} + +// Write data for a single point. A point can have many fields and one field +// is equal to one MetricDatum. There is a limit on how many MetricDatums a +// request can have so we process one Point at a time. +func (c *CloudWatch) WriteSinglePoint(point *client.Point) error { + datums := BuildMetricDatum(point) + + const maxDatumsPerCall = 20 // PutMetricData only supports up to 20 data points per call + + for _, partition := range PartitionDatums(maxDatumsPerCall, datums) { + err := c.WriteToCloudWatch(partition) + + if err != nil { + return err + } + } + + return nil +} + +func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { + params := &cloudwatch.PutMetricDataInput{ + MetricData: datums, + Namespace: aws.String(c.Namespace), + } + + _, err := c.svc.PutMetricData(params) + + if err != nil { + log.Printf("CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error()) + } + + return err +} + +// Partition the MetricDatums into smaller slices of a max size so that are under the limit +// for the AWS API calls. +func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch.MetricDatum { + + numberOfPartitions := len(datums) / size + if len(datums)%size != 0 { + numberOfPartitions += 1 + } + + partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions) + + for i := 0; i < numberOfPartitions; i++ { + start := size * i + end := size * (i + 1) + if end > len(datums) { + end = len(datums) + } + + partitions[i] = datums[start:end] + } + + return partitions +} + +// Make a MetricDatum for each field in a Point. Only fields with values that can be +// converted to float64 are supported. Non-supported fields are skipped. +func BuildMetricDatum(point *client.Point) []*cloudwatch.MetricDatum { + datums := make([]*cloudwatch.MetricDatum, len(point.Fields())) + i := 0 + + var value float64 + + for k, v := range point.Fields() { + switch t := v.(type) { + case int: + value = float64(t) + case int32: + value = float64(t) + case int64: + value = float64(t) + case float64: + value = t + case bool: + if t { + value = 1 + } else { + value = 0 + } + case time.Time: + value = float64(t.Unix()) + default: + // Skip unsupported type. + datums = datums[:len(datums)-1] + continue + } + + datums[i] = &cloudwatch.MetricDatum{ + MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")), + Value: aws.Float64(value), + Dimensions: BuildDimensions(point.Tags()), + Timestamp: aws.Time(point.Time()), + } + + i += 1 + } + + return datums +} + +// Make a list of Dimensions by using a Point's tags. CloudWatch supports up to +// 10 dimensions per metric so we only keep up to the first 10 alphabetically. +// This always includes the "host" tag if it exists. +func BuildDimensions(ptTags map[string]string) []*cloudwatch.Dimension { + + const MaxDimensions = 10 + dimensions := make([]*cloudwatch.Dimension, int(math.Min(float64(len(ptTags)), MaxDimensions))) + + i := 0 + + // This is pretty ugly but we always want to include the "host" tag if it exists. + if host, ok := ptTags["host"]; ok { + dimensions[i] = &cloudwatch.Dimension{ + Name: aws.String("host"), + Value: aws.String(host), + } + i += 1 + } + + var keys []string + for k := range ptTags { + if k != "host" { + keys = append(keys, k) + } + } + sort.Strings(keys) + + for _, k := range keys { + if i >= MaxDimensions { + break + } + + dimensions[i] = &cloudwatch.Dimension{ + Name: aws.String(k), + Value: aws.String(ptTags[k]), + } + + i += 1 + } + + return dimensions +} + +func init() { + outputs.Add("cloudwatch", func() outputs.Output { + return &CloudWatch{} + }) +} diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go new file mode 100644 index 000000000..2041e14fd --- /dev/null +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -0,0 +1,88 @@ +package cloudwatch + +import ( + "sort" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatch" + + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +// Test that each tag becomes one dimension +func TestBuildDimensions(t *testing.T) { + const MaxDimensions = 10 + + assert := assert.New(t) + + testPoint := testutil.TestPoint(1) + dimensions := BuildDimensions(testPoint.Tags()) + + tagKeys := make([]string, len(testPoint.Tags())) + i := 0 + for k, _ := range testPoint.Tags() { + tagKeys[i] = k + i += 1 + } + + sort.Strings(tagKeys) + + if len(testPoint.Tags()) >= MaxDimensions { + assert.Equal(MaxDimensions, len(dimensions), "Number of dimensions should be less than MaxDimensions") + } else { + assert.Equal(len(testPoint.Tags()), len(dimensions), "Number of dimensions should be equal to number of tags") + } + + for i, key := range tagKeys { + if i >= 10 { + break + } + assert.Equal(key, *dimensions[i].Name, "Key should be equal") + assert.Equal(testPoint.Tags()[key], *dimensions[i].Value, "Value should be equal") + } +} + +// Test that points with valid values have a MetricDatum created where as non valid do not. +// Skips "time.Time" type as something is converting the value to string. +func TestBuildMetricDatums(t *testing.T) { + assert := assert.New(t) + + validPoints := []*client.Point{ + testutil.TestPoint(1), + testutil.TestPoint(int32(1)), + testutil.TestPoint(int64(1)), + testutil.TestPoint(float64(1)), + testutil.TestPoint(true), + } + + for _, point := range validPoints { + datums := BuildMetricDatum(point) + assert.Equal(1, len(datums), "Valid type should create a Datum") + } + + nonValidPoint := testutil.TestPoint("Foo") + + assert.Equal(0, len(BuildMetricDatum(nonValidPoint)), "Invalid type should not create a Datum") +} + +func TestPartitionDatums(t *testing.T) { + + assert := assert.New(t) + + testDatum := cloudwatch.MetricDatum{ + MetricName: aws.String("Foo"), + Value: aws.Float64(1), + } + + oneDatum := []*cloudwatch.MetricDatum{&testDatum} + twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum} + threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum} + + assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) + assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) +} From 4d0dc8b7c8cd618df56d7e97e0acebbc0e49e7ad Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 20 Jan 2016 16:21:19 -0700 Subject: [PATCH 101/103] Refactor the docker plugin, use go-dockerclient throughout fixes #503 fixes #463 --- CHANGELOG.md | 9 + README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/docker/README.md | 148 +++++++++++++ plugins/inputs/docker/docker.go | 312 +++++++++++++++++++++++++++ plugins/inputs/docker/docker_test.go | 190 ++++++++++++++++ plugins/inputs/system/docker.go | 89 -------- plugins/inputs/system/docker_test.go | 119 ---------- plugins/inputs/system/mock_PS.go | 9 - plugins/inputs/system/ps.go | 67 +----- 10 files changed, 662 insertions(+), 283 deletions(-) create mode 100644 plugins/inputs/docker/README.md create mode 100644 plugins/inputs/docker/docker.go create mode 100644 plugins/inputs/docker/docker_test.go delete mode 100644 plugins/inputs/system/docker.go delete mode 100644 plugins/inputs/system/docker_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cc1308b9..fcf56d2db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ ## v0.10.1 [unreleased] +### Release Notes: + +- The docker plugin has been significantly overhauled to add more metrics +and allow for docker-machine (incl OSX) support. +[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md) +for the latest measurements, fields, and tags. There is also now support for +specifying a docker endpoint to get metrics from. + ### Features - [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! - [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! @@ -16,6 +24,7 @@ - [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering. - [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics. - [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2! +- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul. ### Bugfixes - [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! diff --git a/README.md b/README.md index 89d26faad..7207db8a9 100644 --- a/README.md +++ b/README.md @@ -139,6 +139,7 @@ Currently implemented sources: * apache * bcache * disque +* docker * elasticsearch * exec (generic JSON-emitting executable plugin) * haproxy diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index cb83dfdf9..cfd802438 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -5,6 +5,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/disque" + _ "github.com/influxdata/telegraf/plugins/inputs/docker" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" _ "github.com/influxdata/telegraf/plugins/inputs/exec" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md new file mode 100644 index 000000000..fa662ca80 --- /dev/null +++ b/plugins/inputs/docker/README.md @@ -0,0 +1,148 @@ +# Docker Input Plugin + +The docker plugin uses the docker remote API to gather metrics on running +docker containers. You can read Docker's documentation for their remote API +[here](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.20/#get-container-stats-based-on-resource-usage) + +The docker plugin uses the excellent +[fsouza go-dockerclient](https://github.com/fsouza/go-dockerclient) library to +gather stats. Documentation for the library can be found +[here](https://godoc.org/github.com/fsouza/go-dockerclient) and documentation +for the stat structure can be found +[here](https://godoc.org/github.com/fsouza/go-dockerclient#Stats) + +### Configuration: + +``` +# Read metrics about docker containers +[[inputs.docker]] + # Docker Endpoint + # To use TCP, set endpoint = "tcp://[ip]:[port]" + # To use environment variables (ie, docker-machine), set endpoint = "ENV" + endpoint = "unix:///var/run/docker.sock" + # Only collect metrics for these containers, collect all if empty + container_names = [] +``` + +### Measurements & Fields: + +Every effort was made to preserve the names based on the JSON response from the +docker API. + +Note that the docker_cpu metric may appear multiple times per collection, based +on the availability of per-cpu stats on your system. + +- docker_mem + - total_pgmafault + - cache + - mapped_file + - total_inactive_file + - pgpgout + - rss + - total_mapped_file + - writeback + - unevictable + - pgpgin + - total_unevictable + - pgmajfault + - total_rss + - total_rss_huge + - total_writeback + - total_inactive_anon + - rss_huge + - hierarchical_memory_limit + - total_pgfault + - total_active_file + - active_anon + - total_active_anon + - total_pgpgout + - total_cache + - inactive_anon + - active_file + - pgfault + - inactive_file + - total_pgpgin + - max_usage + - usage + - failcnt + - limit +- docker_cpu + - throttling_periods + - throttling_throttled_periods + - throttling_throttled_time + - usage_in_kernelmode + - usage_in_usermode + - usage_system + - usage_total +- docker_net + - rx_dropped + - rx_bytes + - rx_errors + - tx_packets + - tx_dropped + - rx_packets + - tx_errors + - tx_bytes +- docker_blkio + - io_service_bytes_recursive_async + - io_service_bytes_recursive_read + - io_service_bytes_recursive_sync + - io_service_bytes_recursive_total + - io_service_bytes_recursive_write + - io_serviced_recursive_async + - io_serviced_recursive_read + - io_serviced_recursive_sync + - io_serviced_recursive_total + - io_serviced_recursive_write + +### Tags: + +- All stats have the following tags: + - cont_id (container ID) + - cont_image (container image) + - cont_name (container name) +- docker_cpu specific: + - cpu +- docker_net specific: + - network +- docker_blkio specific: + - device + +### Example Output: + +``` +% ./telegraf -config ~/ws/telegraf.conf -input-filter docker -test +* Plugin: docker, Collection 1 +> docker_mem,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka \ +active_anon=52568064i,active_file=6926336i,cache=12038144i,fail_count=0i,\ +hierarchical_memory_limit=9223372036854771712i,inactive_anon=52707328i,\ +inactive_file=5111808i,limit=1044578304i,mapped_file=10301440i,\ +max_usage=140656640i,pgfault=63762i,pgmajfault=2837i,pgpgin=73355i,\ +pgpgout=45736i,rss=105275392i,rss_huge=4194304i,total_active_anon=52568064i,\ +total_active_file=6926336i,total_cache=12038144i,total_inactive_anon=52707328i,\ +total_inactive_file=5111808i,total_mapped_file=10301440i,total_pgfault=63762i,\ +total_pgmafault=0i,total_pgpgin=73355i,total_pgpgout=45736i,\ +total_rss=105275392i,total_rss_huge=4194304i,total_unevictable=0i,\ +total_writeback=0i,unevictable=0i,usage=117440512i,writeback=0i 1453409536840126713 +> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka,cpu=cpu-total \ +throttling_periods=0i,throttling_throttled_periods=0i,\ +throttling_throttled_time=0i,usage_in_kernelmode=440000000i,\ +usage_in_usermode=2290000000i,usage_system=84795360000000i,\ +usage_total=6628208865i 1453409536840126713 +> docker_cpu,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka,cpu=cpu0 \ +usage_total=6628208865i 1453409536840126713 +> docker_net,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka,network=eth0 \ +rx_bytes=7468i,rx_dropped=0i,rx_errors=0i,rx_packets=94i,tx_bytes=946i,\ +tx_dropped=0i,tx_errors=0i,tx_packets=13i 1453409536840126713 +> docker_blkio,cont_id=5705ba8ed8fb47527410653d60a8bb2f3af5e62372297c419022a3cc6d45d848,\ +cont_image=spotify/kafka,cont_name=kafka,device=8:0 \ +io_service_bytes_recursive_async=80216064i,io_service_bytes_recursive_read=79925248i,\ +io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888i,\ +io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\ +io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\ +io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713 +``` diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go new file mode 100644 index 000000000..70fcaa19a --- /dev/null +++ b/plugins/inputs/docker/docker.go @@ -0,0 +1,312 @@ +package system + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf/plugins/inputs" + + "github.com/fsouza/go-dockerclient" +) + +type Docker struct { + Endpoint string + ContainerNames []string + + client *docker.Client +} + +var sampleConfig = ` + # Docker Endpoint + # To use TCP, set endpoint = "tcp://[ip]:[port]" + # To use environment variables (ie, docker-machine), set endpoint = "ENV" + endpoint = "unix:///var/run/docker.sock" + # Only collect metrics for these containers, collect all if empty + container_names = [] +` + +func (d *Docker) Description() string { + return "Read metrics about docker containers" +} + +func (d *Docker) SampleConfig() string { return sampleConfig } + +func (d *Docker) Gather(acc inputs.Accumulator) error { + if d.client == nil { + var c *docker.Client + var err error + if d.Endpoint == "ENV" { + c, err = docker.NewClientFromEnv() + if err != nil { + return err + } + } else if d.Endpoint == "" { + c, err = docker.NewClient("unix:///var/run/docker.sock") + if err != nil { + return err + } + } else { + c, err = docker.NewClient(d.Endpoint) + if err != nil { + return err + } + } + d.client = c + } + + opts := docker.ListContainersOptions{} + containers, err := d.client.ListContainers(opts) + if err != nil { + return err + } + + var wg sync.WaitGroup + wg.Add(len(containers)) + for _, container := range containers { + go func(c docker.APIContainers) { + defer wg.Done() + err := d.gatherContainer(c, acc) + if err != nil { + fmt.Println(err.Error()) + } + }(container) + } + wg.Wait() + + return nil +} + +func (d *Docker) gatherContainer( + container docker.APIContainers, + acc inputs.Accumulator, +) error { + // Parse container name + cname := "unknown" + if len(container.Names) > 0 { + // Not sure what to do with other names, just take the first. + cname = strings.TrimPrefix(container.Names[0], "/") + } + + tags := map[string]string{ + "cont_id": container.ID, + "cont_name": cname, + "cont_image": container.Image, + } + if len(d.ContainerNames) > 0 { + if !sliceContains(cname, d.ContainerNames) { + return nil + } + } + + statChan := make(chan *docker.Stats) + done := make(chan bool) + statOpts := docker.StatsOptions{ + Stream: false, + ID: container.ID, + Stats: statChan, + Done: done, + Timeout: time.Duration(time.Second * 5), + } + + var err error + go func() { + err = d.client.Stats(statOpts) + }() + + stat := <-statChan + if err != nil { + return err + } + + // Add labels to tags + for k, v := range container.Labels { + tags[k] = v + } + + gatherContainerStats(stat, acc, tags) + + return nil +} + +func gatherContainerStats( + stat *docker.Stats, + acc inputs.Accumulator, + tags map[string]string, +) { + now := stat.Read + + memfields := map[string]interface{}{ + "max_usage": stat.MemoryStats.MaxUsage, + "usage": stat.MemoryStats.Usage, + "fail_count": stat.MemoryStats.Failcnt, + "limit": stat.MemoryStats.Limit, + "total_pgmafault": stat.MemoryStats.Stats.TotalPgmafault, + "cache": stat.MemoryStats.Stats.Cache, + "mapped_file": stat.MemoryStats.Stats.MappedFile, + "total_inactive_file": stat.MemoryStats.Stats.TotalInactiveFile, + "pgpgout": stat.MemoryStats.Stats.Pgpgout, + "rss": stat.MemoryStats.Stats.Rss, + "total_mapped_file": stat.MemoryStats.Stats.TotalMappedFile, + "writeback": stat.MemoryStats.Stats.Writeback, + "unevictable": stat.MemoryStats.Stats.Unevictable, + "pgpgin": stat.MemoryStats.Stats.Pgpgin, + "total_unevictable": stat.MemoryStats.Stats.TotalUnevictable, + "pgmajfault": stat.MemoryStats.Stats.Pgmajfault, + "total_rss": stat.MemoryStats.Stats.TotalRss, + "total_rss_huge": stat.MemoryStats.Stats.TotalRssHuge, + "total_writeback": stat.MemoryStats.Stats.TotalWriteback, + "total_inactive_anon": stat.MemoryStats.Stats.TotalInactiveAnon, + "rss_huge": stat.MemoryStats.Stats.RssHuge, + "hierarchical_memory_limit": stat.MemoryStats.Stats.HierarchicalMemoryLimit, + "total_pgfault": stat.MemoryStats.Stats.TotalPgfault, + "total_active_file": stat.MemoryStats.Stats.TotalActiveFile, + "active_anon": stat.MemoryStats.Stats.ActiveAnon, + "total_active_anon": stat.MemoryStats.Stats.TotalActiveAnon, + "total_pgpgout": stat.MemoryStats.Stats.TotalPgpgout, + "total_cache": stat.MemoryStats.Stats.TotalCache, + "inactive_anon": stat.MemoryStats.Stats.InactiveAnon, + "active_file": stat.MemoryStats.Stats.ActiveFile, + "pgfault": stat.MemoryStats.Stats.Pgfault, + "inactive_file": stat.MemoryStats.Stats.InactiveFile, + "total_pgpgin": stat.MemoryStats.Stats.TotalPgpgin, + } + acc.AddFields("docker_mem", memfields, tags, now) + + cpufields := map[string]interface{}{ + "usage_total": stat.CPUStats.CPUUsage.TotalUsage, + "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, + "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, + "usage_system": stat.CPUStats.SystemCPUUsage, + "throttling_periods": stat.CPUStats.ThrottlingData.Periods, + "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, + "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, + } + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + acc.AddFields("docker_cpu", cpufields, cputags, now) + + for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage { + percputags := copyTags(tags) + percputags["cpu"] = fmt.Sprintf("cpu%d", i) + acc.AddFields("docker_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now) + } + + for network, netstats := range stat.Networks { + netfields := map[string]interface{}{ + "rx_dropped": netstats.RxDropped, + "rx_bytes": netstats.RxBytes, + "rx_errors": netstats.RxErrors, + "tx_packets": netstats.TxPackets, + "tx_dropped": netstats.TxDropped, + "rx_packets": netstats.RxPackets, + "tx_errors": netstats.TxErrors, + "tx_bytes": netstats.TxBytes, + } + // Create a new network tag dictionary for the "network" tag + nettags := copyTags(tags) + nettags["network"] = network + acc.AddFields("docker_net", netfields, nettags, now) + } + + gatherBlockIOMetrics(stat, acc, tags, now) +} + +func gatherBlockIOMetrics( + stat *docker.Stats, + acc inputs.Accumulator, + tags map[string]string, + now time.Time, +) { + blkioStats := stat.BlkioStats + // Make a map of devices to their block io stats + deviceStatMap := make(map[string]map[string]interface{}) + + for _, metric := range blkioStats.IOServiceBytesRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + _, ok := deviceStatMap[device] + if !ok { + deviceStatMap[device] = make(map[string]interface{}) + } + + field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOServicedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + _, ok := deviceStatMap[device] + if !ok { + deviceStatMap[device] = make(map[string]interface{}) + } + + field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOQueueRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOServiceTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOWaitTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOMergedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IOTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_time_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.SectorsRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("sectors_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for device, fields := range deviceStatMap { + iotags := copyTags(tags) + iotags["device"] = device + acc.AddFields("docker_blkio", fields, iotags, now) + } +} + +func copyTags(in map[string]string) map[string]string { + out := make(map[string]string) + for k, v := range in { + out[k] = v + } + return out +} + +func sliceContains(in string, sl []string) bool { + for _, str := range sl { + if str == in { + return true + } + } + return false +} + +func init() { + inputs.Add("docker", func() inputs.Input { + return &Docker{} + }) +} diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go new file mode 100644 index 000000000..9b85d1029 --- /dev/null +++ b/plugins/inputs/docker/docker_test.go @@ -0,0 +1,190 @@ +package system + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + + "github.com/fsouza/go-dockerclient" +) + +func TestDockerGatherContainerStats(t *testing.T) { + var acc testutil.Accumulator + stats := testStats() + + tags := map[string]string{ + "cont_id": "foobarbaz", + "cont_name": "redis", + "cont_image": "redis/image", + } + gatherContainerStats(stats, &acc, tags) + + // test docker_net measurement + netfields := map[string]interface{}{ + "rx_dropped": uint64(1), + "rx_bytes": uint64(2), + "rx_errors": uint64(3), + "tx_packets": uint64(4), + "tx_dropped": uint64(1), + "rx_packets": uint64(2), + "tx_errors": uint64(3), + "tx_bytes": uint64(4), + } + nettags := copyTags(tags) + nettags["network"] = "eth0" + acc.AssertContainsTaggedFields(t, "docker_net", netfields, nettags) + + // test docker_blkio measurement + blkiotags := copyTags(tags) + blkiotags["device"] = "6:0" + blkiofields := map[string]interface{}{ + "io_service_bytes_recursive_read": uint64(100), + "io_serviced_recursive_write": uint64(101), + } + acc.AssertContainsTaggedFields(t, "docker_blkio", blkiofields, blkiotags) + + // test docker_mem measurement + memfields := map[string]interface{}{ + "max_usage": uint64(1001), + "usage": uint64(1111), + "fail_count": uint64(1), + "limit": uint64(20), + "total_pgmafault": uint64(0), + "cache": uint64(0), + "mapped_file": uint64(0), + "total_inactive_file": uint64(0), + "pgpgout": uint64(0), + "rss": uint64(0), + "total_mapped_file": uint64(0), + "writeback": uint64(0), + "unevictable": uint64(0), + "pgpgin": uint64(0), + "total_unevictable": uint64(0), + "pgmajfault": uint64(0), + "total_rss": uint64(44), + "total_rss_huge": uint64(444), + "total_writeback": uint64(55), + "total_inactive_anon": uint64(0), + "rss_huge": uint64(0), + "hierarchical_memory_limit": uint64(0), + "total_pgfault": uint64(0), + "total_active_file": uint64(0), + "active_anon": uint64(0), + "total_active_anon": uint64(0), + "total_pgpgout": uint64(0), + "total_cache": uint64(0), + "inactive_anon": uint64(0), + "active_file": uint64(1), + "pgfault": uint64(2), + "inactive_file": uint64(3), + "total_pgpgin": uint64(4), + } + acc.AssertContainsTaggedFields(t, "docker_mem", memfields, tags) + + // test docker_cpu measurement + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + cpufields := map[string]interface{}{ + "usage_total": uint64(500), + "usage_in_usermode": uint64(100), + "usage_in_kernelmode": uint64(200), + "usage_system": uint64(100), + "throttling_periods": uint64(1), + "throttling_throttled_periods": uint64(0), + "throttling_throttled_time": uint64(0), + } + acc.AssertContainsTaggedFields(t, "docker_cpu", cpufields, cputags) + + cputags["cpu"] = "cpu0" + cpu0fields := map[string]interface{}{ + "usage_total": uint64(1), + } + acc.AssertContainsTaggedFields(t, "docker_cpu", cpu0fields, cputags) + + cputags["cpu"] = "cpu1" + cpu1fields := map[string]interface{}{ + "usage_total": uint64(1002), + } + acc.AssertContainsTaggedFields(t, "docker_cpu", cpu1fields, cputags) +} + +func testStats() *docker.Stats { + stats := &docker.Stats{ + Read: time.Now(), + Networks: make(map[string]docker.NetworkStats), + } + + stats.CPUStats.CPUUsage.PercpuUsage = []uint64{1, 1002} + stats.CPUStats.CPUUsage.UsageInUsermode = 100 + stats.CPUStats.CPUUsage.TotalUsage = 500 + stats.CPUStats.CPUUsage.UsageInKernelmode = 200 + stats.CPUStats.SystemCPUUsage = 100 + stats.CPUStats.ThrottlingData.Periods = 1 + + stats.MemoryStats.Stats.TotalPgmafault = 0 + stats.MemoryStats.Stats.Cache = 0 + stats.MemoryStats.Stats.MappedFile = 0 + stats.MemoryStats.Stats.TotalInactiveFile = 0 + stats.MemoryStats.Stats.Pgpgout = 0 + stats.MemoryStats.Stats.Rss = 0 + stats.MemoryStats.Stats.TotalMappedFile = 0 + stats.MemoryStats.Stats.Writeback = 0 + stats.MemoryStats.Stats.Unevictable = 0 + stats.MemoryStats.Stats.Pgpgin = 0 + stats.MemoryStats.Stats.TotalUnevictable = 0 + stats.MemoryStats.Stats.Pgmajfault = 0 + stats.MemoryStats.Stats.TotalRss = 44 + stats.MemoryStats.Stats.TotalRssHuge = 444 + stats.MemoryStats.Stats.TotalWriteback = 55 + stats.MemoryStats.Stats.TotalInactiveAnon = 0 + stats.MemoryStats.Stats.RssHuge = 0 + stats.MemoryStats.Stats.HierarchicalMemoryLimit = 0 + stats.MemoryStats.Stats.TotalPgfault = 0 + stats.MemoryStats.Stats.TotalActiveFile = 0 + stats.MemoryStats.Stats.ActiveAnon = 0 + stats.MemoryStats.Stats.TotalActiveAnon = 0 + stats.MemoryStats.Stats.TotalPgpgout = 0 + stats.MemoryStats.Stats.TotalCache = 0 + stats.MemoryStats.Stats.InactiveAnon = 0 + stats.MemoryStats.Stats.ActiveFile = 1 + stats.MemoryStats.Stats.Pgfault = 2 + stats.MemoryStats.Stats.InactiveFile = 3 + stats.MemoryStats.Stats.TotalPgpgin = 4 + + stats.MemoryStats.MaxUsage = 1001 + stats.MemoryStats.Usage = 1111 + stats.MemoryStats.Failcnt = 1 + stats.MemoryStats.Limit = 20 + + stats.Networks["eth0"] = docker.NetworkStats{ + RxDropped: 1, + RxBytes: 2, + RxErrors: 3, + TxPackets: 4, + TxDropped: 1, + RxPackets: 2, + TxErrors: 3, + TxBytes: 4, + } + + sbr := docker.BlkioStatsEntry{ + Major: 6, + Minor: 0, + Op: "read", + Value: 100, + } + sr := docker.BlkioStatsEntry{ + Major: 6, + Minor: 0, + Op: "write", + Value: 101, + } + + stats.BlkioStats.IOServiceBytesRecursive = append( + stats.BlkioStats.IOServiceBytesRecursive, sbr) + stats.BlkioStats.IOServicedRecursive = append( + stats.BlkioStats.IOServicedRecursive, sr) + + return stats +} diff --git a/plugins/inputs/system/docker.go b/plugins/inputs/system/docker.go deleted file mode 100644 index 4f60c771e..000000000 --- a/plugins/inputs/system/docker.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build linux - -package system - -import ( - "fmt" - - "github.com/influxdata/telegraf/plugins/inputs" -) - -type DockerStats struct { - ps PS -} - -func (_ *DockerStats) Description() string { - return "Read metrics about docker containers" -} - -func (_ *DockerStats) SampleConfig() string { return "" } - -func (s *DockerStats) Gather(acc inputs.Accumulator) error { - containers, err := s.ps.DockerStat() - if err != nil { - return fmt.Errorf("error getting docker info: %s", err) - } - - for _, cont := range containers { - tags := map[string]string{ - "id": cont.Id, - "name": cont.Name, - "command": cont.Command, - } - for k, v := range cont.Labels { - tags[k] = v - } - - cts := cont.CPU - - fields := map[string]interface{}{ - "user": cts.User, - "system": cts.System, - "idle": cts.Idle, - "nice": cts.Nice, - "iowait": cts.Iowait, - "irq": cts.Irq, - "softirq": cts.Softirq, - "steal": cts.Steal, - "guest": cts.Guest, - "guest_nice": cts.GuestNice, - - "cache": cont.Mem.Cache, - "rss": cont.Mem.RSS, - "rss_huge": cont.Mem.RSSHuge, - "mapped_file": cont.Mem.MappedFile, - "swap_in": cont.Mem.Pgpgin, - "swap_out": cont.Mem.Pgpgout, - "page_fault": cont.Mem.Pgfault, - "page_major_fault": cont.Mem.Pgmajfault, - "inactive_anon": cont.Mem.InactiveAnon, - "active_anon": cont.Mem.ActiveAnon, - "inactive_file": cont.Mem.InactiveFile, - "active_file": cont.Mem.ActiveFile, - "unevictable": cont.Mem.Unevictable, - "memory_limit": cont.Mem.HierarchicalMemoryLimit, - "total_cache": cont.Mem.TotalCache, - "total_rss": cont.Mem.TotalRSS, - "total_rss_huge": cont.Mem.TotalRSSHuge, - "total_mapped_file": cont.Mem.TotalMappedFile, - "total_swap_in": cont.Mem.TotalPgpgIn, - "total_swap_out": cont.Mem.TotalPgpgOut, - "total_page_fault": cont.Mem.TotalPgFault, - "total_page_major_fault": cont.Mem.TotalPgMajFault, - "total_inactive_anon": cont.Mem.TotalInactiveAnon, - "total_active_anon": cont.Mem.TotalActiveAnon, - "total_inactive_file": cont.Mem.TotalInactiveFile, - "total_active_file": cont.Mem.TotalActiveFile, - "total_unevictable": cont.Mem.TotalUnevictable, - } - acc.AddFields("docker", fields, tags) - } - - return nil -} - -func init() { - inputs.Add("docker", func() inputs.Input { - return &DockerStats{ps: &systemPS{}} - }) -} diff --git a/plugins/inputs/system/docker_test.go b/plugins/inputs/system/docker_test.go deleted file mode 100644 index 6f680d8be..000000000 --- a/plugins/inputs/system/docker_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// +build linux - -package system - -import ( - "testing" - - "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/docker" - - "github.com/stretchr/testify/require" -) - -func TestDockerStats_GenerateStats(t *testing.T) { - var mps MockPS - var acc testutil.Accumulator - - ds := &DockerContainerStat{ - Name: "blah", - CPU: &cpu.CPUTimesStat{ - CPU: "all", - User: 3.1, - System: 8.2, - Idle: 80.1, - Nice: 1.3, - Iowait: 0.2, - Irq: 0.1, - Softirq: 0.11, - Steal: 0.0001, - Guest: 8.1, - GuestNice: 0.324, - }, - Mem: &docker.CgroupMemStat{ - ContainerID: "blah", - Cache: 1, - RSS: 2, - RSSHuge: 3, - MappedFile: 4, - Pgpgin: 5, - Pgpgout: 6, - Pgfault: 7, - Pgmajfault: 8, - InactiveAnon: 9, - ActiveAnon: 10, - InactiveFile: 11, - ActiveFile: 12, - Unevictable: 13, - HierarchicalMemoryLimit: 14, - TotalCache: 15, - TotalRSS: 16, - TotalRSSHuge: 17, - TotalMappedFile: 18, - TotalPgpgIn: 19, - TotalPgpgOut: 20, - TotalPgFault: 21, - TotalPgMajFault: 22, - TotalInactiveAnon: 23, - TotalActiveAnon: 24, - TotalInactiveFile: 25, - TotalActiveFile: 26, - TotalUnevictable: 27, - }, - } - - mps.On("DockerStat").Return([]*DockerContainerStat{ds}, nil) - - err := (&DockerStats{&mps}).Gather(&acc) - require.NoError(t, err) - - dockertags := map[string]string{ - "name": "blah", - "id": "", - "command": "", - } - - fields := map[string]interface{}{ - "user": 3.1, - "system": 8.2, - "idle": 80.1, - "nice": 1.3, - "iowait": 0.2, - "irq": 0.1, - "softirq": 0.11, - "steal": 0.0001, - "guest": 8.1, - "guest_nice": 0.324, - - "cache": uint64(1), - "rss": uint64(2), - "rss_huge": uint64(3), - "mapped_file": uint64(4), - "swap_in": uint64(5), - "swap_out": uint64(6), - "page_fault": uint64(7), - "page_major_fault": uint64(8), - "inactive_anon": uint64(9), - "active_anon": uint64(10), - "inactive_file": uint64(11), - "active_file": uint64(12), - "unevictable": uint64(13), - "memory_limit": uint64(14), - "total_cache": uint64(15), - "total_rss": uint64(16), - "total_rss_huge": uint64(17), - "total_mapped_file": uint64(18), - "total_swap_in": uint64(19), - "total_swap_out": uint64(20), - "total_page_fault": uint64(21), - "total_page_major_fault": uint64(22), - "total_inactive_anon": uint64(23), - "total_active_anon": uint64(24), - "total_inactive_file": uint64(25), - "total_active_file": uint64(26), - "total_unevictable": uint64(27), - } - - acc.AssertContainsTaggedFields(t, "docker", fields, dockertags) -} diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index 661adb2ac..6e9a5f93e 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -87,15 +87,6 @@ func (m *MockPS) SwapStat() (*mem.SwapMemoryStat, error) { return r0, r1 } -func (m *MockPS) DockerStat() ([]*DockerContainerStat, error) { - ret := m.Called() - - r0 := ret.Get(0).([]*DockerContainerStat) - r1 := ret.Error(1) - - return r0, r1 -} - func (m *MockPS) NetConnections() ([]net.NetConnectionStat, error) { ret := m.Called() diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 17971c5f5..98c9b8b31 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -1,30 +1,17 @@ package system import ( - gonet "net" "os" - "strings" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" - dc "github.com/fsouza/go-dockerclient" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" - "github.com/shirou/gopsutil/docker" "github.com/shirou/gopsutil/mem" "github.com/shirou/gopsutil/net" ) -type DockerContainerStat struct { - Id string - Name string - Command string - Labels map[string]string - CPU *cpu.CPUTimesStat - Mem *docker.CgroupMemStat -} - type PS interface { CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) DiskUsage(mountPointFilter []string) ([]*disk.DiskUsageStat, error) @@ -33,7 +20,6 @@ type PS interface { DiskIO() (map[string]disk.DiskIOCountersStat, error) VMStat() (*mem.VirtualMemoryStat, error) SwapStat() (*mem.SwapMemoryStat, error) - DockerStat() ([]*DockerContainerStat, error) NetConnections() ([]net.NetConnectionStat, error) } @@ -44,9 +30,7 @@ func add(acc inputs.Accumulator, } } -type systemPS struct { - dockerClient *dc.Client -} +type systemPS struct{} func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) { var cpuTimes []cpu.CPUTimesStat @@ -133,52 +117,3 @@ func (s *systemPS) VMStat() (*mem.VirtualMemoryStat, error) { func (s *systemPS) SwapStat() (*mem.SwapMemoryStat, error) { return mem.SwapMemory() } - -func (s *systemPS) DockerStat() ([]*DockerContainerStat, error) { - if s.dockerClient == nil { - c, err := dc.NewClient("unix:///var/run/docker.sock") - if err != nil { - return nil, err - } - - s.dockerClient = c - } - - opts := dc.ListContainersOptions{} - - containers, err := s.dockerClient.ListContainers(opts) - if err != nil { - if _, ok := err.(*gonet.OpError); ok { - return nil, nil - } - - return nil, err - } - - var stats []*DockerContainerStat - - for _, container := range containers { - ctu, err := docker.CgroupCPUDocker(container.ID) - if err != nil { - return nil, err - } - - mem, err := docker.CgroupMemDocker(container.ID) - if err != nil { - return nil, err - } - - name := strings.Join(container.Names, " ") - - stats = append(stats, &DockerContainerStat{ - Id: container.ID, - Name: name, - Command: container.Command, - Labels: container.Labels, - CPU: ctu, - Mem: mem, - }) - } - - return stats, nil -} From e910a03af46aede95a7ac4b2b1a8bc6ff55fdb32 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 21 Jan 2016 16:44:35 -0700 Subject: [PATCH 102/103] Changelog update --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fcf56d2db..28b47fe20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ specifying a docker endpoint to get metrics from. - [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering. - [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics. - [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2! +- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration. - [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul. ### Bugfixes @@ -32,6 +33,7 @@ specifying a docker endpoint to get metrics from. - [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! - [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated. - [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats. +- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux ## v0.10.0 [2016-01-12] From f2ab5f61f5105fc72f09a84b5968c97aba6f6a0f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 21 Jan 2016 16:52:49 -0700 Subject: [PATCH 103/103] Gather elasticsearch nodes in goroutines, handle errors fixes #464 --- plugins/inputs/elasticsearch/elasticsearch.go | 49 ++++++++++++++----- 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 9b59537c0..304e0e3d7 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -2,8 +2,11 @@ package elasticsearch import ( "encoding/json" + "errors" "fmt" "net/http" + "strings" + "sync" "time" "github.com/influxdata/telegraf/internal" @@ -93,21 +96,41 @@ func (e *Elasticsearch) Description() string { // Gather reads the stats from Elasticsearch and writes it to the // Accumulator. func (e *Elasticsearch) Gather(acc inputs.Accumulator) error { + errChan := make(chan error, len(e.Servers)) + var wg sync.WaitGroup + wg.Add(len(e.Servers)) + for _, serv := range e.Servers { - var url string - if e.Local { - url = serv + statsPathLocal - } else { - url = serv + statsPath - } - if err := e.gatherNodeStats(url, acc); err != nil { - return err - } - if e.ClusterHealth { - e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", serv), acc) - } + go func(s string, acc inputs.Accumulator) { + defer wg.Done() + var url string + if e.Local { + url = s + statsPathLocal + } else { + url = s + statsPath + } + if err := e.gatherNodeStats(url, acc); err != nil { + errChan <- err + return + } + if e.ClusterHealth { + e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc) + } + }(serv, acc) } - return nil + + wg.Wait() + close(errChan) + // Get all errors and return them as one giant error + errStrings := []string{} + for err := range errChan { + errStrings = append(errStrings, err.Error()) + } + + if len(errStrings) == 0 { + return nil + } + return errors.New(strings.Join(errStrings, "\n")) } func (e *Elasticsearch) gatherNodeStats(url string, acc inputs.Accumulator) error {