Merge 875c8c2290
into bdac9b7241
This commit is contained in:
commit
edfbf84f0c
22
CHANGELOG.md
22
CHANGELOG.md
|
@ -1,3 +1,25 @@
|
|||
## v0.3.0 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
- **breaking change** the `io` plugin has been renamed `diskio`
|
||||
- **breaking change** Plugin measurements aggregated into a single measurement.
|
||||
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
|
||||
for configuration.
|
||||
- `twemproxy` plugin: `prefix` option removed.
|
||||
- `procstat` cpu measurements are now prepended with `cpu_time_` instead of
|
||||
only `cpu_`
|
||||
- The prometheus plugin schema has not been changed (measurements have not been
|
||||
aggregated).
|
||||
|
||||
### Features
|
||||
- Plugin measurements aggregated into a single measurement.
|
||||
- Added ability to specify per-plugin tags
|
||||
- Added ability to specify per-plugin measurement suffix and prefix.
|
||||
(`name_prefix` and `name_suffix`)
|
||||
- Added ability to override base plugin name. (`name_override`)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.2.5 [unreleased]
|
||||
|
||||
### Features
|
||||
|
|
|
@ -0,0 +1,177 @@
|
|||
# Telegraf Configuration
|
||||
|
||||
## Plugin Configuration
|
||||
|
||||
There are some configuration options that are configurable per plugin:
|
||||
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the plugin).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific plugin's measurements.
|
||||
|
||||
### Plugin Filters
|
||||
|
||||
There are also filters that can be configured per plugin:
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the
|
||||
current plugin. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted.
|
||||
* **drop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current plugin. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not emitted.
|
||||
This is tested on measurements that have passed the tagpass test.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular plugin should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
### Plugin Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||
fields which begin with `time_`.
|
||||
|
||||
```toml
|
||||
[tags]
|
||||
dc = "denver-1"
|
||||
|
||||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[outputs]
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# PLUGINS
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
# filter all fields beginning with 'time_'
|
||||
drop = ["time_*"]
|
||||
```
|
||||
|
||||
### Plugin Config: tagpass and tagdrop
|
||||
|
||||
```toml
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[plugins.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[plugins.disk]]
|
||||
[plugins.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
fstype = [ "ext4", "xfs" ]
|
||||
# Globs can also be used on the tag values
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
### Plugin Config: pass and drop
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest & steal CPU usage
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
drop = ["usage_guest", "usage_steal"]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[plugins.disk]]
|
||||
pass = ["inodes*"]
|
||||
```
|
||||
|
||||
### Plugin config: prefix, suffix, and override
|
||||
|
||||
This plugin will emit measurements with the name `cpu_total`
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
name_suffix = "_total"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
This will emit measurements with the name `foobar`
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
name_override = "foobar"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
### Plugin config: tags
|
||||
|
||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
`tag2=bar`
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
[plugins.cpu.tags]
|
||||
tag1 = "foo"
|
||||
tag2 = "bar"
|
||||
```
|
||||
|
||||
### Multiple plugins of the same type
|
||||
|
||||
Additional plugins (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file:
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## Output Configuration
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
Outputs also support the same configurable options as plugins
|
||||
(pass, drop, tagpass, tagdrop), added in 0.2.4
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
drop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
pass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
133
README.md
133
README.md
|
@ -116,99 +116,10 @@ unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes.
|
|||
* **debug**: Set to true to gather and send metrics to STDOUT as well as
|
||||
InfluxDB.
|
||||
|
||||
## Plugin Options
|
||||
## Configuration
|
||||
|
||||
There are 5 configuration options that are configurable per plugin:
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the
|
||||
current plugin. Each string in the array is tested as a glob match against metric names
|
||||
and if it matches, the metric is emitted.
|
||||
* **drop**: The inverse of pass, if a metric name matches, it is not emitted.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter metrics by the current plugin. Each string in the array is tested as a glob match against
|
||||
the tag name, and if it matches the metric is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the metric is not emitted.
|
||||
This is tested on metrics that have passed the tagpass test.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular plugin should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
### Plugin Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||
measurements which begin with `cpu_time`.
|
||||
|
||||
```toml
|
||||
[tags]
|
||||
dc = "denver-1"
|
||||
|
||||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[outputs]
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# PLUGINS
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
Below is how to configure `tagpass` and `tagdrop` parameters
|
||||
|
||||
```toml
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[plugins.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[plugins.disk]]
|
||||
[plugins.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
fstype = [ "ext4", "xfs" ]
|
||||
# Globs can also be used on the tag values
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
Below is how to configure `pass` and `drop` parameters
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest CPU usage
|
||||
[[plugins.cpu]]
|
||||
drop = [ "cpu_usage_guest" ]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[plugins.disk]]
|
||||
pass = [ "disk_inodes*" ]
|
||||
```
|
||||
|
||||
|
||||
Additional plugins (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file:
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
## Supported Plugins
|
||||
|
||||
|
@ -226,7 +137,7 @@ Telegraf currently has support for collecting metrics from:
|
|||
* haproxy
|
||||
* httpjson (generic JSON-emitting http service plugin)
|
||||
* influxdb
|
||||
* jolokia (remote JMX with JSON over HTTP)
|
||||
* jolokia
|
||||
* leofs
|
||||
* lustre2
|
||||
* mailchimp
|
||||
|
@ -249,10 +160,10 @@ Telegraf currently has support for collecting metrics from:
|
|||
* system
|
||||
* cpu
|
||||
* mem
|
||||
* io
|
||||
* net
|
||||
* netstat
|
||||
* disk
|
||||
* diskio
|
||||
* swap
|
||||
|
||||
## Supported Service Plugins
|
||||
|
@ -265,40 +176,6 @@ Telegraf can collect metrics via the following services:
|
|||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
|
||||
## Output options
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
Outputs also support the same configurable options as plugins
|
||||
(pass, drop, tagpass, tagdrop), added in 0.2.4
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
drop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
pass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
||||
|
||||
|
||||
## Supported Outputs
|
||||
|
||||
* influxdb
|
||||
|
|
|
@ -69,30 +69,72 @@ func (ac *accumulator) AddFields(
|
|||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
// Validate uint64 and float64 fields
|
||||
if !ac.pluginConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.pluginConfig.NameOverride) != 0 {
|
||||
measurement = ac.pluginConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.pluginConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.pluginConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.pluginConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.pluginConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.pluginConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.pluginConfig != nil {
|
||||
if !ac.pluginConfig.Filter.ShouldPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[k] = v
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
result[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
result[k] = int64(9223372036854775807)
|
||||
}
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] has a NaN or Inf field, skipping",
|
||||
measurement)
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
return
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
|
@ -106,19 +148,7 @@ func (ac *accumulator) AddFields(
|
|||
measurement = ac.prefix + measurement
|
||||
}
|
||||
|
||||
if ac.pluginConfig != nil {
|
||||
if !ac.pluginConfig.Filter.ShouldPass(measurement) || !ac.pluginConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(measurement, tags, fields, timestamp)
|
||||
pt, err := client.NewPoint(measurement, tags, result, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
|
|
6
agent.go
6
agent.go
|
@ -104,7 +104,7 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
|||
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.SetPrefix(plugin.Name + "_")
|
||||
// acc.SetPrefix(plugin.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
|
@ -141,7 +141,7 @@ func (a *Agent) gatherSeparate(
|
|||
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.SetPrefix(plugin.Name + "_")
|
||||
// acc.SetPrefix(plugin.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
|
@ -187,7 +187,7 @@ func (a *Agent) Test() error {
|
|||
for _, plugin := range a.Config.Plugins {
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(true)
|
||||
acc.SetPrefix(plugin.Name + "_")
|
||||
// acc.SetPrefix(plugin.Name + "_")
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name)
|
||||
if plugin.Config.Interval != 0 {
|
||||
|
|
|
@ -97,8 +97,8 @@
|
|||
# Mountpoints=["/"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[plugins.io]]
|
||||
# By default, telegraf will gather stats for all devices including
|
||||
[[plugins.diskio]]
|
||||
# By default, telegraf will gather stats for all devices including
|
||||
# disk partitions.
|
||||
# Setting devices will restrict the stats to the specified devcies.
|
||||
# Devices=["sda","sdb"]
|
||||
|
|
|
@ -112,9 +112,13 @@ type Filter struct {
|
|||
|
||||
// PluginConfig containing a name, interval, and filter
|
||||
type PluginConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
|
@ -142,12 +146,12 @@ func (ro *RunningOutput) FilterPoints(points []*client.Point) []*client.Point {
|
|||
|
||||
// ShouldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldPass(measurement string) bool {
|
||||
func (f Filter) ShouldPass(fieldkey string) bool {
|
||||
if f.Pass != nil {
|
||||
for _, pat := range f.Pass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(measurement, pat) || internal.Glob(pat, measurement) {
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +162,7 @@ func (f Filter) ShouldPass(measurement string) bool {
|
|||
for _, pat := range f.Drop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(measurement, pat) || internal.Glob(pat, measurement) {
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -527,6 +531,11 @@ func (c *Config) addPlugin(name string, table *ast.Table) error {
|
|||
if len(c.PluginFilters) > 0 && !sliceContains(name, c.PluginFilters) {
|
||||
return nil
|
||||
}
|
||||
// Legacy support renaming io plugin to diskio
|
||||
if name == "io" {
|
||||
name = "diskio"
|
||||
}
|
||||
|
||||
creator, ok := plugins.Plugins[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested plugin: %s", name)
|
||||
|
@ -628,7 +637,8 @@ func buildFilter(tbl *ast.Table) Filter {
|
|||
return f
|
||||
}
|
||||
|
||||
// buildPlugin parses plugin specific items from the ast.Table, builds the filter and returns a
|
||||
// buildPlugin parses plugin specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// PluginConfig to be inserted into RunningPlugin
|
||||
func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) {
|
||||
cp := &PluginConfig{Name: name}
|
||||
|
@ -644,10 +654,47 @@ func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_prefix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementPrefix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_suffix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementSuffix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_override"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.NameOverride = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for plugin %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "name_prefix")
|
||||
delete(tbl.Fields, "name_suffix")
|
||||
delete(tbl.Fields, "name_override")
|
||||
delete(tbl.Fields, "interval")
|
||||
delete(tbl.Fields, "tags")
|
||||
cp.Filter = buildFilter(tbl)
|
||||
return cp, nil
|
||||
|
||||
}
|
||||
|
||||
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
|
||||
|
@ -659,5 +706,4 @@ func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) {
|
|||
Filter: buildFilter(tbl),
|
||||
}
|
||||
return oc, nil
|
||||
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||
drop = ["cpu_time"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[plugins.disk]]
|
||||
[[plugins.diskio]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many disque servers
|
||||
|
|
|
@ -3,6 +3,7 @@ package internal
|
|||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -27,6 +28,39 @@ func (d *Duration) UnmarshalTOML(b []byte) error {
|
|||
|
||||
var NotImplementedError = errors.New("not implemented yet")
|
||||
|
||||
type JSONFlattener struct {
|
||||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
// FlattenJSON flattens nested maps/interfaces into a fields map
|
||||
func (f *JSONFlattener) FlattenJSON(
|
||||
fieldname string,
|
||||
v interface{},
|
||||
) error {
|
||||
if f.Fields == nil {
|
||||
f.Fields = make(map[string]interface{})
|
||||
}
|
||||
fieldname = strings.Trim(fieldname, "_")
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
f.Fields[fieldname] = t
|
||||
case nil, bool, string, []interface{}:
|
||||
// ignored types
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)",
|
||||
t, t, fieldname)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadLines reads contents from a file and splits them by new lines.
|
||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||
func ReadLines(filename string) ([]string, error) {
|
||||
|
|
|
@ -58,21 +58,26 @@ func (a *Amon) Write(points []*client.Point) error {
|
|||
return nil
|
||||
}
|
||||
ts := TimeSeries{}
|
||||
var tempSeries = make([]*Metric, len(points))
|
||||
var acceptablePoints = 0
|
||||
tempSeries := []*Metric{}
|
||||
metricCounter := 0
|
||||
|
||||
for _, pt := range points {
|
||||
metric := &Metric{
|
||||
Metric: strings.Replace(pt.Name(), "_", ".", -1),
|
||||
}
|
||||
if p, err := buildPoint(pt); err == nil {
|
||||
metric.Points[0] = p
|
||||
tempSeries[acceptablePoints] = metric
|
||||
acceptablePoints += 1
|
||||
mname := strings.Replace(pt.Name(), "_", ".", -1)
|
||||
if amonPts, err := buildPoints(pt); err == nil {
|
||||
for fieldName, amonPt := range amonPts {
|
||||
metric := &Metric{
|
||||
Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1),
|
||||
}
|
||||
metric.Points[0] = amonPt
|
||||
tempSeries = append(tempSeries, metric)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Metric for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
ts.Series = make([]*Metric, acceptablePoints)
|
||||
|
||||
ts.Series = make([]*Metric, metricCounter)
|
||||
copy(ts.Series, tempSeries[0:])
|
||||
tsBytes, err := json.Marshal(ts)
|
||||
if err != nil {
|
||||
|
@ -110,13 +115,17 @@ func (a *Amon) authenticatedUrl() string {
|
|||
return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey)
|
||||
}
|
||||
|
||||
func buildPoint(pt *client.Point) (Point, error) {
|
||||
var p Point
|
||||
if err := p.setValue(pt.Fields()["value"]); err != nil {
|
||||
return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
func buildPoints(pt *client.Point) (map[string]Point, error) {
|
||||
pts := make(map[string]Point)
|
||||
for k, v := range pt.Fields() {
|
||||
var p Point
|
||||
if err := p.setValue(v); err != nil {
|
||||
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
pts[k] = p
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
return p, nil
|
||||
return pts, nil
|
||||
}
|
||||
|
||||
func (p *Point) setValue(v interface{}) error {
|
||||
|
|
|
@ -67,23 +67,26 @@ func (d *Datadog) Write(points []*client.Point) error {
|
|||
return nil
|
||||
}
|
||||
ts := TimeSeries{}
|
||||
var tempSeries = make([]*Metric, len(points))
|
||||
var acceptablePoints = 0
|
||||
tempSeries := []*Metric{}
|
||||
metricCounter := 0
|
||||
|
||||
for _, pt := range points {
|
||||
metric := &Metric{
|
||||
Metric: strings.Replace(pt.Name(), "_", ".", -1),
|
||||
Tags: buildTags(pt.Tags()),
|
||||
Host: pt.Tags()["host"],
|
||||
}
|
||||
if p, err := buildPoint(pt); err == nil {
|
||||
metric.Points[0] = p
|
||||
tempSeries[acceptablePoints] = metric
|
||||
acceptablePoints += 1
|
||||
mname := strings.Replace(pt.Name(), "_", ".", -1)
|
||||
if amonPts, err := buildPoints(pt); err == nil {
|
||||
for fieldName, amonPt := range amonPts {
|
||||
metric := &Metric{
|
||||
Metric: mname + strings.Replace(fieldName, "_", ".", -1),
|
||||
}
|
||||
metric.Points[0] = amonPt
|
||||
tempSeries = append(tempSeries, metric)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Metric for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
ts.Series = make([]*Metric, acceptablePoints)
|
||||
|
||||
ts.Series = make([]*Metric, metricCounter)
|
||||
copy(ts.Series, tempSeries[0:])
|
||||
tsBytes, err := json.Marshal(ts)
|
||||
if err != nil {
|
||||
|
@ -123,13 +126,17 @@ func (d *Datadog) authenticatedUrl() string {
|
|||
return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode())
|
||||
}
|
||||
|
||||
func buildPoint(pt *client.Point) (Point, error) {
|
||||
var p Point
|
||||
if err := p.setValue(pt.Fields()["value"]); err != nil {
|
||||
return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
func buildPoints(pt *client.Point) (map[string]Point, error) {
|
||||
pts := make(map[string]Point)
|
||||
for k, v := range pt.Fields() {
|
||||
var p Point
|
||||
if err := p.setValue(v); err != nil {
|
||||
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
pts[k] = p
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
return p, nil
|
||||
return pts, nil
|
||||
}
|
||||
|
||||
func buildTags(ptTags map[string]string) []string {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"math/rand"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
|
@ -110,6 +111,7 @@ func (i *InfluxDB) Connect() error {
|
|||
}
|
||||
|
||||
i.conns = conns
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -74,17 +74,21 @@ func (l *Librato) Write(points []*client.Point) error {
|
|||
return nil
|
||||
}
|
||||
metrics := Metrics{}
|
||||
var tempGauges = make([]*Gauge, len(points))
|
||||
var acceptablePoints = 0
|
||||
tempGauges := []*Gauge{}
|
||||
metricCounter := 0
|
||||
|
||||
for _, pt := range points {
|
||||
if gauge, err := l.buildGauge(pt); err == nil {
|
||||
tempGauges[acceptablePoints] = gauge
|
||||
acceptablePoints += 1
|
||||
if gauges, err := l.buildGauges(pt); err == nil {
|
||||
for _, gauge := range gauges {
|
||||
tempGauges = append(tempGauges, gauge)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Gauge for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
metrics.Gauges = make([]*Gauge, acceptablePoints)
|
||||
|
||||
metrics.Gauges = make([]*Gauge, metricCounter)
|
||||
copy(metrics.Gauges, tempGauges[0:])
|
||||
metricsBytes, err := json.Marshal(metrics)
|
||||
if err != nil {
|
||||
|
@ -118,22 +122,28 @@ func (l *Librato) Description() string {
|
|||
return "Configuration for Librato API to send metrics to."
|
||||
}
|
||||
|
||||
func (l *Librato) buildGauge(pt *client.Point) (*Gauge, error) {
|
||||
gauge := &Gauge{
|
||||
Name: pt.Name(),
|
||||
MeasureTime: pt.Time().Unix(),
|
||||
}
|
||||
if err := gauge.setValue(pt.Fields()["value"]); err != nil {
|
||||
return gauge, fmt.Errorf("unable to extract value from Fields, %s\n", err.Error())
|
||||
}
|
||||
if l.SourceTag != "" {
|
||||
if source, ok := pt.Tags()[l.SourceTag]; ok {
|
||||
gauge.Source = source
|
||||
} else {
|
||||
return gauge, fmt.Errorf("undeterminable Source type from Field, %s\n", l.SourceTag)
|
||||
func (l *Librato) buildGauges(pt *client.Point) ([]*Gauge, error) {
|
||||
gauges := []*Gauge{}
|
||||
for fieldName, value := range pt.Fields() {
|
||||
gauge := &Gauge{
|
||||
Name: pt.Name() + "_" + fieldName,
|
||||
MeasureTime: pt.Time().Unix(),
|
||||
}
|
||||
if err := gauge.setValue(value); err != nil {
|
||||
return gauges, fmt.Errorf("unable to extract value from Fields, %s\n",
|
||||
err.Error())
|
||||
}
|
||||
if l.SourceTag != "" {
|
||||
if source, ok := pt.Tags()[l.SourceTag]; ok {
|
||||
gauge.Source = source
|
||||
} else {
|
||||
return gauges,
|
||||
fmt.Errorf("undeterminable Source type from Field, %s\n",
|
||||
l.SourceTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
return gauge, nil
|
||||
return gauges, nil
|
||||
}
|
||||
|
||||
func (g *Gauge) setValue(v interface{}) error {
|
||||
|
|
|
@ -62,7 +62,8 @@ func (o *OpenTSDB) Write(points []*client.Point) error {
|
|||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
var timeNow = time.Now()
|
||||
now := time.Now()
|
||||
|
||||
// Send Data with telnet / socket communication
|
||||
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||
tcpAddr, _ := net.ResolveTCPAddr("tcp", uri)
|
||||
|
@ -70,32 +71,21 @@ func (o *OpenTSDB) Write(points []*client.Point) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet connect fail")
|
||||
}
|
||||
defer connection.Close()
|
||||
|
||||
for _, pt := range points {
|
||||
metric := &MetricLine{
|
||||
Metric: fmt.Sprintf("%s%s", o.Prefix, pt.Name()),
|
||||
Timestamp: timeNow.Unix(),
|
||||
}
|
||||
|
||||
metricValue, buildError := buildValue(pt)
|
||||
if buildError != nil {
|
||||
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||
continue
|
||||
}
|
||||
metric.Value = metricValue
|
||||
|
||||
tagsSlice := buildTags(pt.Tags())
|
||||
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
|
||||
|
||||
messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
|
||||
if o.Debug {
|
||||
fmt.Print(messageLine)
|
||||
}
|
||||
_, err := connection.Write([]byte(messageLine))
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error())
|
||||
for _, metric := range buildMetrics(pt, now, o.Prefix) {
|
||||
messageLine := fmt.Sprintf("put %s %v %s %s\n",
|
||||
metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
|
||||
if o.Debug {
|
||||
fmt.Print(messageLine)
|
||||
}
|
||||
_, err := connection.Write([]byte(messageLine))
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
defer connection.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -111,9 +101,29 @@ func buildTags(ptTags map[string]string) []string {
|
|||
return tags
|
||||
}
|
||||
|
||||
func buildValue(pt *client.Point) (string, error) {
|
||||
func buildMetrics(pt *client.Point, now time.Time, prefix string) []*MetricLine {
|
||||
ret := []*MetricLine{}
|
||||
for fieldName, value := range pt.Fields() {
|
||||
metric := &MetricLine{
|
||||
Metric: fmt.Sprintf("%s%s_%s", prefix, pt.Name(), fieldName),
|
||||
Timestamp: now.Unix(),
|
||||
}
|
||||
|
||||
metricValue, buildError := buildValue(value)
|
||||
if buildError != nil {
|
||||
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||
continue
|
||||
}
|
||||
metric.Value = metricValue
|
||||
tagsSlice := buildTags(pt.Tags())
|
||||
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
|
||||
ret = append(ret, metric)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func buildValue(v interface{}) (string, error) {
|
||||
var retv string
|
||||
var v = pt.Fields()["value"]
|
||||
switch p := v.(type) {
|
||||
case int64:
|
||||
retv = IntToString(int64(p))
|
||||
|
|
|
@ -55,8 +55,10 @@ func (r *Riemann) Write(points []*client.Point) error {
|
|||
|
||||
var events []*raidman.Event
|
||||
for _, p := range points {
|
||||
ev := buildEvent(p)
|
||||
events = append(events, ev)
|
||||
evs := buildEvents(p)
|
||||
for _, ev := range evs {
|
||||
events = append(events, ev)
|
||||
}
|
||||
}
|
||||
|
||||
var senderr = r.client.SendMulti(events)
|
||||
|
@ -68,24 +70,28 @@ func (r *Riemann) Write(points []*client.Point) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func buildEvent(p *client.Point) *raidman.Event {
|
||||
host, ok := p.Tags()["host"]
|
||||
if !ok {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
host = "unknown"
|
||||
} else {
|
||||
host = hostname
|
||||
func buildEvents(p *client.Point) []*raidman.Event {
|
||||
events := []*raidman.Event{}
|
||||
for fieldName, value := range p.Fields() {
|
||||
host, ok := p.Tags()["host"]
|
||||
if !ok {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
host = "unknown"
|
||||
} else {
|
||||
host = hostname
|
||||
}
|
||||
}
|
||||
|
||||
event := &raidman.Event{
|
||||
Host: host,
|
||||
Service: p.Name() + "_" + fieldName,
|
||||
Metric: value,
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
|
||||
var event = &raidman.Event{
|
||||
Host: host,
|
||||
Service: p.Name(),
|
||||
Metric: p.Fields()["value"],
|
||||
}
|
||||
|
||||
return event
|
||||
return events
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -247,26 +247,32 @@ func get(key []byte, host string) (map[string]string, error) {
|
|||
return data, err
|
||||
}
|
||||
|
||||
func readAerospikeStats(stats map[string]string, acc plugins.Accumulator, host, namespace string) {
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc plugins.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
for key, value := range stats {
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
|
||||
// We are going to ignore all string based keys
|
||||
val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
if strings.Contains(key, "-") {
|
||||
key = strings.Replace(key, "-", "_", -1)
|
||||
}
|
||||
acc.Add(key, val, tags)
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||
|
|
|
@ -72,32 +72,33 @@ func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
|
|||
tags := getTags(addr)
|
||||
|
||||
sc := bufio.NewScanner(resp.Body)
|
||||
fields := make(map[string]interface{})
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
if strings.Contains(line, ":") {
|
||||
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1])
|
||||
|
||||
switch key {
|
||||
|
||||
case "Scoreboard":
|
||||
n.gatherScores(part, acc, tags)
|
||||
for field, value := range n.gatherScores(part) {
|
||||
fields[field] = value
|
||||
}
|
||||
default:
|
||||
value, err := strconv.ParseFloat(part, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("apache", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[string]string) {
|
||||
|
||||
func (n *Apache) gatherScores(data string) map[string]interface{} {
|
||||
var waiting, open int = 0, 0
|
||||
var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
|
@ -129,17 +130,20 @@ func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[str
|
|||
}
|
||||
}
|
||||
|
||||
acc.Add("scboard_waiting", float64(waiting), tags)
|
||||
acc.Add("scboard_starting", float64(S), tags)
|
||||
acc.Add("scboard_reading", float64(R), tags)
|
||||
acc.Add("scboard_sending", float64(W), tags)
|
||||
acc.Add("scboard_keepalive", float64(K), tags)
|
||||
acc.Add("scboard_dnslookup", float64(D), tags)
|
||||
acc.Add("scboard_closing", float64(C), tags)
|
||||
acc.Add("scboard_logging", float64(L), tags)
|
||||
acc.Add("scboard_finishing", float64(G), tags)
|
||||
acc.Add("scboard_idle_cleanup", float64(I), tags)
|
||||
acc.Add("scboard_open", float64(open), tags)
|
||||
fields := map[string]interface{}{
|
||||
"scboard_waiting": float64(waiting),
|
||||
"scboard_starting": float64(S),
|
||||
"scboard_reading": float64(R),
|
||||
"scboard_sending": float64(W),
|
||||
"scboard_keepalive": float64(K),
|
||||
"scboard_dnslookup": float64(D),
|
||||
"scboard_closing": float64(C),
|
||||
"scboard_logging": float64(L),
|
||||
"scboard_finishing": float64(G),
|
||||
"scboard_idle_cleanup": float64(I),
|
||||
"scboard_open": float64(open),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// Get tag(s) for the apache plugin
|
||||
|
|
|
@ -81,7 +81,9 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
|
|||
}
|
||||
rawValue := strings.TrimSpace(string(file))
|
||||
value := prettyToBytes(rawValue)
|
||||
acc.Add("dirty_data", value, tags)
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
fields["dirty_data"] = value
|
||||
|
||||
for _, path := range metrics {
|
||||
key := filepath.Base(path)
|
||||
|
@ -92,12 +94,13 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
|
|||
}
|
||||
if key == "bypassed" {
|
||||
value := prettyToBytes(rawValue)
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = value
|
||||
} else {
|
||||
value, _ := strconv.ParseUint(rawValue, 10, 64)
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("bcache", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -117,7 +120,7 @@ func (b *Bcache) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*")
|
||||
if len(bdevs) < 1 {
|
||||
return errors.New("Can't found any bcache device")
|
||||
return errors.New("Can't find any bcache device")
|
||||
}
|
||||
for _, bdev := range bdevs {
|
||||
if restrictDevs {
|
||||
|
|
|
@ -155,6 +155,8 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
|
|||
|
||||
var read int
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{"host": addr.String()}
|
||||
for read < sz {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
|
@ -176,12 +178,11 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
|
|||
continue
|
||||
}
|
||||
|
||||
tags := map[string]string{"host": addr.String()}
|
||||
val := strings.TrimSpace(parts[1])
|
||||
|
||||
ival, err := strconv.ParseUint(val, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add(metric, ival, tags)
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -190,9 +191,9 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
acc.Add(metric, fval, tags)
|
||||
fields[metric] = fval
|
||||
}
|
||||
|
||||
acc.AddFields("disque", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -31,8 +31,9 @@ contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`,
|
|||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_cluster_health
|
||||
|
||||
contains `status`, `number_of_shards`, `number_of_replicas`, `active_primary_shards`,
|
||||
`active_shards`, `relocating_shards`, `initializing_shards`, `unassigned_shards` fields
|
||||
contains `status`, `number_of_shards`, `number_of_replicas`,
|
||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
||||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_indices
|
||||
|
||||
#### node measurements:
|
||||
|
@ -316,4 +317,4 @@ Transport statistics about sent and received bytes in cluster communication meas
|
|||
- elasticsearch_transport_rx_count value=6
|
||||
- elasticsearch_transport_rx_size_in_bytes value=1380
|
||||
- elasticsearch_transport_tx_count value=6
|
||||
- elasticsearch_transport_tx_size_in_bytes value=1380
|
||||
- elasticsearch_transport_tx_size_in_bytes value=1380
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
|
@ -141,10 +142,14 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) err
|
|||
"breakers": n.Breakers,
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for p, s := range stats {
|
||||
if err := e.parseInterface(acc, p, tags, s); err != nil {
|
||||
f := internal.JSONFlattener{}
|
||||
err := f.FlattenJSON("", s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -168,7 +173,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator)
|
|||
"unassigned_shards": clusterStats.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"cluster_health",
|
||||
"elasticsearch_cluster_health",
|
||||
clusterFields,
|
||||
map[string]string{"name": clusterStats.ClusterName},
|
||||
measurementTime,
|
||||
|
@ -186,7 +191,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator)
|
|||
"unassigned_shards": health.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"indices",
|
||||
"elasticsearch_indices",
|
||||
indexFields,
|
||||
map[string]string{"index": name},
|
||||
measurementTime,
|
||||
|
@ -205,7 +210,8 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
|||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK)
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
|
||||
r.StatusCode, http.StatusOK)
|
||||
}
|
||||
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
|
||||
return err
|
||||
|
@ -213,25 +219,6 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) parseInterface(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) error {
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
if err := e.parseInterface(acc, prefix+"_"+k, tags, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
acc.Add(prefix, t, tags)
|
||||
case bool, string, []interface{}:
|
||||
// ignored types
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("elasticsearch: got unexpected type %T with value %v (%s)", t, t, prefix)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugins.Add("elasticsearch", func() plugins.Plugin {
|
||||
return NewElasticsearch()
|
||||
|
|
|
@ -3,59 +3,38 @@ package exec
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gonuts/go-shellquote"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"math"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gonuts/go-shellquote"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
# specify commands via an array of tables
|
||||
[[plugins.exec.commands]]
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
|
||||
# name of the command (used as a prefix for measurements)
|
||||
name = "mycollector"
|
||||
|
||||
# Only run this command if it has been at least this many
|
||||
# seconds since it last ran
|
||||
interval = 10
|
||||
`
|
||||
|
||||
type Exec struct {
|
||||
Commands []*Command
|
||||
runner Runner
|
||||
clock Clock
|
||||
}
|
||||
Command string
|
||||
Name string
|
||||
|
||||
type Command struct {
|
||||
Command string
|
||||
Name string
|
||||
Interval int
|
||||
lastRunAt time.Time
|
||||
runner Runner
|
||||
}
|
||||
|
||||
type Runner interface {
|
||||
Run(*Command) ([]byte, error)
|
||||
}
|
||||
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
Run(*Exec) ([]byte, error)
|
||||
}
|
||||
|
||||
type CommandRunner struct{}
|
||||
|
||||
type RealClock struct{}
|
||||
|
||||
func (c CommandRunner) Run(command *Command) ([]byte, error) {
|
||||
command.lastRunAt = time.Now()
|
||||
split_cmd, err := shellquote.Split(command.Command)
|
||||
func (c CommandRunner) Run(e *Exec) ([]byte, error) {
|
||||
split_cmd, err := shellquote.Split(e.Command)
|
||||
if err != nil || len(split_cmd) == 0 {
|
||||
return nil, fmt.Errorf("exec: unable to parse command, %s", err)
|
||||
}
|
||||
|
@ -65,18 +44,14 @@ func (c CommandRunner) Run(command *Command) ([]byte, error) {
|
|||
cmd.Stdout = &out
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, command.Command)
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, e.Command)
|
||||
}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
func (c RealClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func NewExec() *Exec {
|
||||
return &Exec{runner: CommandRunner{}, clock: RealClock{}}
|
||||
return &Exec{runner: CommandRunner{}}
|
||||
}
|
||||
|
||||
func (e *Exec) SampleConfig() string {
|
||||
|
@ -88,73 +63,34 @@ func (e *Exec) Description() string {
|
|||
}
|
||||
|
||||
func (e *Exec) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
errorChannel := make(chan error, len(e.Commands))
|
||||
|
||||
for _, c := range e.Commands {
|
||||
wg.Add(1)
|
||||
go func(c *Command, acc plugins.Accumulator) {
|
||||
defer wg.Done()
|
||||
err := e.gatherCommand(c, acc)
|
||||
if err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(c, acc)
|
||||
out, err := e.runner.Run(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// Get all errors and return them as one giant error
|
||||
errorStrings := []string{}
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
var jsonOut interface{}
|
||||
err = json.Unmarshal(out, &jsonOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s",
|
||||
e.Command, err)
|
||||
}
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
func (e *Exec) gatherCommand(c *Command, acc plugins.Accumulator) error {
|
||||
secondsSinceLastRun := 0.0
|
||||
|
||||
if c.lastRunAt.Unix() == 0 { // means time is uninitialized
|
||||
secondsSinceLastRun = math.Inf(1)
|
||||
var msrmnt_name string
|
||||
if e.Name == "" {
|
||||
msrmnt_name = "exec"
|
||||
} else {
|
||||
secondsSinceLastRun = (e.clock.Now().Sub(c.lastRunAt)).Seconds()
|
||||
}
|
||||
|
||||
if secondsSinceLastRun >= float64(c.Interval) {
|
||||
out, err := e.runner.Run(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var jsonOut interface{}
|
||||
err = json.Unmarshal(out, &jsonOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s", c.Command, err)
|
||||
}
|
||||
|
||||
processResponse(acc, c.Name, map[string]string{}, jsonOut)
|
||||
msrmnt_name = "exec_" + e.Name
|
||||
}
|
||||
acc.AddFields(msrmnt_name, f.Fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) {
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
processResponse(acc, prefix+"_"+k, tags, v)
|
||||
}
|
||||
case float64:
|
||||
acc.Add(prefix, v, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugins.Add("exec", func() plugins.Plugin {
|
||||
return NewExec()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1
|
||||
|
@ -152,210 +153,208 @@ func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("Unable to get valid stat result from '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
importCsvResult(res.Body, acc, u.Host)
|
||||
|
||||
return nil
|
||||
return importCsvResult(res.Body, acc, u.Host)
|
||||
}
|
||||
|
||||
func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) ([][]string, error) {
|
||||
func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error {
|
||||
csv := csv.NewReader(r)
|
||||
result, err := csv.ReadAll()
|
||||
now := time.Now()
|
||||
|
||||
for _, row := range result {
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"proxy": row[HF_PXNAME],
|
||||
"sv": row[HF_SVNAME],
|
||||
}
|
||||
for field, v := range row {
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"proxy": row[HF_PXNAME],
|
||||
"sv": row[HF_SVNAME],
|
||||
}
|
||||
switch field {
|
||||
case HF_QCUR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("qcur", ival, tags)
|
||||
fields["qcur"] = ival
|
||||
}
|
||||
case HF_QMAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("qmax", ival, tags)
|
||||
fields["qmax"] = ival
|
||||
}
|
||||
case HF_SCUR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("scur", ival, tags)
|
||||
fields["scur"] = ival
|
||||
}
|
||||
case HF_SMAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("smax", ival, tags)
|
||||
fields["smax"] = ival
|
||||
}
|
||||
case HF_STOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("stot", ival, tags)
|
||||
fields["stot"] = ival
|
||||
}
|
||||
case HF_BIN:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("bin", ival, tags)
|
||||
fields["bin"] = ival
|
||||
}
|
||||
case HF_BOUT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("bout", ival, tags)
|
||||
fields["bout"] = ival
|
||||
}
|
||||
case HF_DREQ:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("dreq", ival, tags)
|
||||
fields["dreq"] = ival
|
||||
}
|
||||
case HF_DRESP:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("dresp", ival, tags)
|
||||
fields["dresp"] = ival
|
||||
}
|
||||
case HF_EREQ:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("ereq", ival, tags)
|
||||
fields["ereq"] = ival
|
||||
}
|
||||
case HF_ECON:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("econ", ival, tags)
|
||||
fields["econ"] = ival
|
||||
}
|
||||
case HF_ERESP:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("eresp", ival, tags)
|
||||
fields["eresp"] = ival
|
||||
}
|
||||
case HF_WRETR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("wretr", ival, tags)
|
||||
fields["wretr"] = ival
|
||||
}
|
||||
case HF_WREDIS:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("wredis", ival, tags)
|
||||
fields["wredis"] = ival
|
||||
}
|
||||
case HF_ACT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("active_servers", ival, tags)
|
||||
fields["active_servers"] = ival
|
||||
}
|
||||
case HF_BCK:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("backup_servers", ival, tags)
|
||||
fields["backup_servers"] = ival
|
||||
}
|
||||
case HF_DOWNTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("downtime", ival, tags)
|
||||
fields["downtime"] = ival
|
||||
}
|
||||
case HF_THROTTLE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("throttle", ival, tags)
|
||||
fields["throttle"] = ival
|
||||
}
|
||||
case HF_LBTOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("lbtot", ival, tags)
|
||||
fields["lbtot"] = ival
|
||||
}
|
||||
case HF_RATE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("rate", ival, tags)
|
||||
fields["rate"] = ival
|
||||
}
|
||||
case HF_RATE_MAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("rate_max", ival, tags)
|
||||
fields["rate_max"] = ival
|
||||
}
|
||||
case HF_CHECK_DURATION:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("check_duration", ival, tags)
|
||||
fields["check_duration"] = ival
|
||||
}
|
||||
case HF_HRSP_1xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.1xx", ival, tags)
|
||||
fields["http_response.1xx"] = ival
|
||||
}
|
||||
case HF_HRSP_2xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.2xx", ival, tags)
|
||||
fields["http_response.2xx"] = ival
|
||||
}
|
||||
case HF_HRSP_3xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.3xx", ival, tags)
|
||||
fields["http_response.3xx"] = ival
|
||||
}
|
||||
case HF_HRSP_4xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.4xx", ival, tags)
|
||||
fields["http_response.4xx"] = ival
|
||||
}
|
||||
case HF_HRSP_5xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.5xx", ival, tags)
|
||||
fields["http_response.5xx"] = ival
|
||||
}
|
||||
case HF_REQ_RATE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("req_rate", ival, tags)
|
||||
fields["req_rate"] = ival
|
||||
}
|
||||
case HF_REQ_RATE_MAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("req_rate_max", ival, tags)
|
||||
fields["req_rate_max"] = ival
|
||||
}
|
||||
case HF_REQ_TOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("req_tot", ival, tags)
|
||||
fields["req_tot"] = ival
|
||||
}
|
||||
case HF_CLI_ABRT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("cli_abort", ival, tags)
|
||||
fields["cli_abort"] = ival
|
||||
}
|
||||
case HF_SRV_ABRT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("srv_abort", ival, tags)
|
||||
fields["srv_abort"] = ival
|
||||
}
|
||||
case HF_QTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("qtime", ival, tags)
|
||||
fields["qtime"] = ival
|
||||
}
|
||||
case HF_CTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("ctime", ival, tags)
|
||||
fields["ctime"] = ival
|
||||
}
|
||||
case HF_RTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("rtime", ival, tags)
|
||||
fields["rtime"] = ival
|
||||
}
|
||||
case HF_TTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("ttime", ival, tags)
|
||||
fields["ttime"] = ival
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
acc.AddFields("haproxy", fields, tags, now)
|
||||
}
|
||||
return result, err
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -9,21 +9,19 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type HttpJson struct {
|
||||
Services []Service
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
Name string
|
||||
Servers []string
|
||||
Method string
|
||||
TagKeys []string
|
||||
Parameters map[string]string
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
type HTTPClient interface {
|
||||
|
@ -47,31 +45,28 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
|||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Specify services via an array of tables
|
||||
[[plugins.httpjson.services]]
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
# List of tag names to extract from top-level of JSON server response
|
||||
# tag_keys = [
|
||||
# "my_tag_1",
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
# List of tag names to extract from top-level of JSON server response
|
||||
# tag_keys = [
|
||||
# "my_tag_1",
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[plugins.httpjson.services.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
# HTTP parameters (all values must be strings)
|
||||
[plugins.httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
`
|
||||
|
||||
func (h *HttpJson) SampleConfig() string {
|
||||
|
@ -86,22 +81,16 @@ func (h *HttpJson) Description() string {
|
|||
func (h *HttpJson) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
totalServers := 0
|
||||
for _, service := range h.Services {
|
||||
totalServers += len(service.Servers)
|
||||
}
|
||||
errorChannel := make(chan error, totalServers)
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
|
||||
for _, service := range h.Services {
|
||||
for _, server := range service.Servers {
|
||||
wg.Add(1)
|
||||
go func(service Service, server string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherServer(acc, service, server); err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(service, server)
|
||||
}
|
||||
for _, server := range h.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherServer(acc, server); err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
@ -129,10 +118,10 @@ func (h *HttpJson) Gather(acc plugins.Accumulator) error {
|
|||
// error: Any error that may have occurred
|
||||
func (h *HttpJson) gatherServer(
|
||||
acc plugins.Accumulator,
|
||||
service Service,
|
||||
serverURL string,
|
||||
) error {
|
||||
resp, err := h.sendRequest(service, serverURL)
|
||||
resp, responseTime, err := h.sendRequest(serverURL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -146,7 +135,7 @@ func (h *HttpJson) gatherServer(
|
|||
"server": serverURL,
|
||||
}
|
||||
|
||||
for _, tag := range service.TagKeys {
|
||||
for _, tag := range h.TagKeys {
|
||||
switch v := jsonOut[tag].(type) {
|
||||
case string:
|
||||
tags[tag] = v
|
||||
|
@ -154,7 +143,22 @@ func (h *HttpJson) gatherServer(
|
|||
delete(jsonOut, tag)
|
||||
}
|
||||
|
||||
processResponse(acc, service.Name, tags, jsonOut)
|
||||
if responseTime >= 0 {
|
||||
jsonOut["response_time"] = responseTime
|
||||
}
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var msrmnt_name string
|
||||
if h.Name == "" {
|
||||
msrmnt_name = "httpjson"
|
||||
} else {
|
||||
msrmnt_name = "httpjson_" + h.Name
|
||||
}
|
||||
acc.AddFields(msrmnt_name, f.Fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -165,34 +169,37 @@ func (h *HttpJson) gatherServer(
|
|||
// Returns:
|
||||
// string: body of the response
|
||||
// error : Any error that may have occurred
|
||||
func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error) {
|
||||
func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
// Prepare URL
|
||||
requestURL, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||
return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
for k, v := range service.Parameters {
|
||||
for k, v := range h.Parameters {
|
||||
params.Add(k, v)
|
||||
}
|
||||
requestURL.RawQuery = params.Encode()
|
||||
|
||||
// Create + send request
|
||||
req, err := http.NewRequest(service.Method, requestURL.String(), nil)
|
||||
req, err := http.NewRequest(h.Method, requestURL.String(), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
resp, err := h.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
responseTime := time.Since(start).Seconds()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return string(body), err
|
||||
return string(body), responseTime, err
|
||||
}
|
||||
|
||||
// Process response
|
||||
|
@ -203,27 +210,10 @@ func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error
|
|||
http.StatusText(resp.StatusCode),
|
||||
http.StatusOK,
|
||||
http.StatusText(http.StatusOK))
|
||||
return string(body), err
|
||||
return string(body), responseTime, err
|
||||
}
|
||||
|
||||
return string(body), err
|
||||
}
|
||||
|
||||
// Flattens the map generated from the JSON object and stores its float values using a
|
||||
// plugins.Accumulator. It ignores any non-float values.
|
||||
// Parameters:
|
||||
// acc: the Accumulator to use
|
||||
// prefix: What the name of the measurement name should be prefixed by.
|
||||
// tags: telegraf tags to
|
||||
func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) {
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
processResponse(acc, prefix+"_"+k, tags, v)
|
||||
}
|
||||
case float64:
|
||||
acc.Add(prefix, v, tags)
|
||||
}
|
||||
return string(body), responseTime, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
const validJSON = `
|
||||
{
|
||||
"parent": {
|
||||
"child": 3,
|
||||
"child": 3.0,
|
||||
"ignored_child": "hi"
|
||||
},
|
||||
"ignored_null": null,
|
||||
|
@ -76,65 +76,64 @@ func (c mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
|||
//
|
||||
// Returns:
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genMockHttpJson(response string, statusCode int) *HttpJson {
|
||||
return &HttpJson{
|
||||
func genMockHttpJsons(response string, statusCode int) []*HttpJson {
|
||||
httpjson1 := &HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Services: []Service{
|
||||
Service{
|
||||
Servers: []string{
|
||||
"http://server1.example.com/metrics/",
|
||||
"http://server2.example.com/metrics/",
|
||||
},
|
||||
Name: "my_webapp",
|
||||
Method: "GET",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
},
|
||||
Service{
|
||||
Servers: []string{
|
||||
"http://server3.example.com/metrics/",
|
||||
"http://server4.example.com/metrics/",
|
||||
},
|
||||
Name: "other_webapp",
|
||||
Method: "POST",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
TagKeys: []string{
|
||||
"role",
|
||||
"build",
|
||||
},
|
||||
},
|
||||
Servers: []string{
|
||||
"http://server1.example.com/metrics/",
|
||||
"http://server2.example.com/metrics/",
|
||||
},
|
||||
Name: "my_webapp",
|
||||
Method: "GET",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
}
|
||||
httpjson2 := &HttpJson{
|
||||
client: mockHTTPClient{responseBody: response, statusCode: statusCode},
|
||||
Servers: []string{
|
||||
"http://server3.example.com/metrics/",
|
||||
"http://server4.example.com/metrics/",
|
||||
},
|
||||
Name: "other_webapp",
|
||||
Method: "POST",
|
||||
Parameters: map[string]string{
|
||||
"httpParam1": "12",
|
||||
"httpParam2": "the second parameter",
|
||||
},
|
||||
TagKeys: []string{
|
||||
"role",
|
||||
"build",
|
||||
},
|
||||
}
|
||||
httpjsons := []*HttpJson{httpjson1, httpjson2}
|
||||
return httpjsons
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJson200(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
httpjsons := genMockHttpJsons(validJSON, 200)
|
||||
for _, httpjson := range httpjsons {
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(acc.Points))
|
||||
|
||||
assert.Equal(t, 8, len(acc.Points))
|
||||
|
||||
for _, service := range httpjson.Services {
|
||||
for _, srv := range service.Servers {
|
||||
for _, srv := range httpjson.Servers {
|
||||
// Override response time
|
||||
for _, p := range acc.Points {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
require.NoError(t,
|
||||
acc.ValidateTaggedValue(
|
||||
fmt.Sprintf("%s_parent_child", service.Name),
|
||||
3.0,
|
||||
map[string]string{"server": srv},
|
||||
),
|
||||
)
|
||||
require.NoError(t,
|
||||
acc.ValidateTaggedValue(
|
||||
fmt.Sprintf("%s_integer", service.Name),
|
||||
4.0,
|
||||
acc.ValidateTaggedFieldsValue(
|
||||
fmt.Sprintf("httpjson_%s", httpjson.Name),
|
||||
map[string]interface{}{
|
||||
"parent_child": 3.0,
|
||||
"integer": 4.0,
|
||||
"response_time": 1.0,
|
||||
},
|
||||
map[string]string{"server": srv},
|
||||
),
|
||||
)
|
||||
|
@ -144,80 +143,95 @@ func TestHttpJson200(t *testing.T) {
|
|||
|
||||
// Test response to HTTP 500
|
||||
func TestHttpJson500(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 500)
|
||||
httpjsons := genMockHttpJsons(validJSON, 500)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson.Gather(&acc)
|
||||
for _, httpjson := range httpjsons {
|
||||
err := httpjson.Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
// 4 error lines for (2 urls) * (2 services)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
|
||||
assert.NotNil(t, err)
|
||||
// 4 error lines for (2 urls) * (2 services)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 2)
|
||||
}
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
}
|
||||
|
||||
// Test response to HTTP 405
|
||||
func TestHttpJsonBadMethod(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 200)
|
||||
httpjson.Services[0].Method = "NOT_A_REAL_METHOD"
|
||||
httpjsons := genMockHttpJsons(validJSON, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson.Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
// 2 error lines for (2 urls) * (1 falied service)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 2)
|
||||
for _, httpjson := range httpjsons {
|
||||
httpjson.Method = "NOT_A_REAL_METHOD"
|
||||
err := httpjson.Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
// 2 error lines for (2 urls) * (1 falied service)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 2)
|
||||
}
|
||||
// (2 measurements) * (2 servers) * (1 successful service)
|
||||
assert.Equal(t, 4, len(acc.Points))
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
}
|
||||
|
||||
// Test response to malformed JSON
|
||||
func TestHttpJsonBadJson(t *testing.T) {
|
||||
httpjson := genMockHttpJson(invalidJSON, 200)
|
||||
httpjsons := genMockHttpJsons(invalidJSON, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson.Gather(&acc)
|
||||
for _, httpjson := range httpjsons {
|
||||
err := httpjson.Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
// 4 error lines for (2 urls) * (2 services)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
|
||||
assert.NotNil(t, err)
|
||||
// 4 error lines for (2 urls) * (2 services)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 2)
|
||||
}
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
}
|
||||
|
||||
// Test response to empty string as response objectgT
|
||||
func TestHttpJsonEmptyResponse(t *testing.T) {
|
||||
httpjson := genMockHttpJson(empty, 200)
|
||||
httpjsons := genMockHttpJsons(empty, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson.Gather(&acc)
|
||||
for _, httpjson := range httpjsons {
|
||||
err := httpjson.Gather(&acc)
|
||||
|
||||
assert.NotNil(t, err)
|
||||
// 4 error lines for (2 urls) * (2 services)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
|
||||
assert.NotNil(t, err)
|
||||
// 4 error lines for (2 urls) * (2 services)
|
||||
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 2)
|
||||
}
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJson200Tags(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSONTags, 200)
|
||||
httpjsons := genMockHttpJsons(validJSONTags, 200)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := httpjson.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
for _, httpjson := range httpjsons {
|
||||
err := httpjson.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 4, len(acc.Points))
|
||||
|
||||
for _, service := range httpjson.Services {
|
||||
if service.Name == "other_webapp" {
|
||||
for _, srv := range service.Servers {
|
||||
if httpjson.Name == "other_webapp" {
|
||||
assert.Equal(t, 4, len(acc.Points))
|
||||
for _, srv := range httpjson.Servers {
|
||||
// Override response time
|
||||
for _, p := range acc.Points {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
require.NoError(t,
|
||||
acc.ValidateTaggedValue(
|
||||
fmt.Sprintf("%s_value", service.Name),
|
||||
15.0,
|
||||
acc.ValidateTaggedFieldsValue(
|
||||
fmt.Sprintf("httpjson_%s", httpjson.Name),
|
||||
map[string]interface{}{
|
||||
"value": 15.0,
|
||||
"response_time": 1.0,
|
||||
},
|
||||
map[string]string{"server": srv, "role": "master", "build": "123"},
|
||||
),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
assert.Equal(t, 2, len(acc.Points))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
@ -23,8 +22,6 @@ type Server struct {
|
|||
type Metric struct {
|
||||
Name string
|
||||
Jmx string
|
||||
Pass []string
|
||||
Drop []string
|
||||
}
|
||||
|
||||
type JolokiaClient interface {
|
||||
|
@ -44,7 +41,6 @@ type Jolokia struct {
|
|||
Context string
|
||||
Servers []Server
|
||||
Metrics []Metric
|
||||
Tags map[string]string
|
||||
}
|
||||
|
||||
func (j *Jolokia) SampleConfig() string {
|
||||
|
@ -52,10 +48,6 @@ func (j *Jolokia) SampleConfig() string {
|
|||
# This is the context root used to compose the jolokia url
|
||||
context = "/jolokia/read"
|
||||
|
||||
# Tags added to each measurements
|
||||
[jolokia.tags]
|
||||
group = "as"
|
||||
|
||||
# List of servers exposing jolokia read service
|
||||
[[plugins.jolokia.servers]]
|
||||
name = "stable"
|
||||
|
@ -70,23 +62,6 @@ func (j *Jolokia) SampleConfig() string {
|
|||
[[plugins.jolokia.metrics]]
|
||||
name = "heap_memory_usage"
|
||||
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
|
||||
|
||||
|
||||
# This drops the 'committed' value from Eden space measurement
|
||||
[[plugins.jolokia.metrics]]
|
||||
name = "memory_eden"
|
||||
jmx = "/java.lang:type=MemoryPool,name=PS Eden Space/Usage"
|
||||
drop = [ "committed" ]
|
||||
|
||||
|
||||
# This passes only DaemonThreadCount and ThreadCount
|
||||
[[plugins.jolokia.metrics]]
|
||||
name = "heap_threads"
|
||||
jmx = "/java.lang:type=Threading"
|
||||
pass = [
|
||||
"DaemonThreadCount",
|
||||
"ThreadCount"
|
||||
]
|
||||
`
|
||||
}
|
||||
|
||||
|
@ -100,12 +75,9 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
resp, err := j.jClient.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -137,65 +109,22 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
|||
return jsonOut, nil
|
||||
}
|
||||
|
||||
func (m *Metric) shouldPass(field string) bool {
|
||||
|
||||
if m.Pass != nil {
|
||||
|
||||
for _, pass := range m.Pass {
|
||||
if strings.HasPrefix(field, pass) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if m.Drop != nil {
|
||||
|
||||
for _, drop := range m.Drop {
|
||||
if strings.HasPrefix(field, drop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Metric) filterFields(fields map[string]interface{}) map[string]interface{} {
|
||||
|
||||
for field, _ := range fields {
|
||||
if !m.shouldPass(field) {
|
||||
delete(fields, field)
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (j *Jolokia) Gather(acc plugins.Accumulator) error {
|
||||
|
||||
context := j.Context //"/jolokia/read"
|
||||
servers := j.Servers
|
||||
metrics := j.Metrics
|
||||
tags := j.Tags
|
||||
|
||||
if tags == nil {
|
||||
tags = map[string]string{}
|
||||
}
|
||||
tags := make(map[string]string)
|
||||
|
||||
for _, server := range servers {
|
||||
tags["server"] = server.Name
|
||||
tags["port"] = server.Port
|
||||
tags["host"] = server.Host
|
||||
fields := make(map[string]interface{})
|
||||
for _, metric := range metrics {
|
||||
|
||||
measurement := metric.Name
|
||||
jmxPath := metric.Jmx
|
||||
|
||||
tags["server"] = server.Name
|
||||
tags["port"] = server.Port
|
||||
tags["host"] = server.Host
|
||||
|
||||
// Prepare URL
|
||||
requestUrl, err := url.Parse("http://" + server.Host + ":" +
|
||||
server.Port + context + jmxPath)
|
||||
|
@ -209,16 +138,20 @@ func (j *Jolokia) Gather(acc plugins.Accumulator) error {
|
|||
out, _ := j.getAttr(requestUrl)
|
||||
|
||||
if values, ok := out["value"]; ok {
|
||||
switch values.(type) {
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
acc.AddFields(measurement, metric.filterFields(values.(map[string]interface{})), tags)
|
||||
for k, v := range t {
|
||||
fields[measurement+"_"+k] = v
|
||||
}
|
||||
case interface{}:
|
||||
acc.Add(measurement, values.(interface{}), tags)
|
||||
fields[measurement] = t
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n", requestUrl.String())
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n",
|
||||
requestUrl.String())
|
||||
}
|
||||
}
|
||||
acc.AddFields("jolokia", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -197,6 +197,8 @@ func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins
|
|||
"node": nodeNameTrimmed,
|
||||
}
|
||||
i := 0
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for scanner.Scan() {
|
||||
key := KeyMapping[serverType][i]
|
||||
val, err := retrieveTokenAfterColon(scanner.Text())
|
||||
|
@ -207,9 +209,10 @@ func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins
|
|||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse the value:%s, err:%s", val, err)
|
||||
}
|
||||
acc.Add(key, fVal, tags)
|
||||
fields[key] = fVal
|
||||
i++
|
||||
}
|
||||
acc.AddFields("leofs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -149,19 +149,19 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping,
|
|||
return err
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for _, line := range lines {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
parts := strings.Fields(line)
|
||||
for _, wanted := range wanted_fields {
|
||||
var data uint64
|
||||
if fields[0] == wanted.inProc {
|
||||
if parts[0] == wanted.inProc {
|
||||
wanted_field := wanted.field
|
||||
// if not set, assume field[1]. Shouldn't be field[0], as
|
||||
// that's a string
|
||||
if wanted_field == 0 {
|
||||
wanted_field = 1
|
||||
}
|
||||
data, err = strconv.ParseUint((fields[wanted_field]), 10, 64)
|
||||
data, err = strconv.ParseUint((parts[wanted_field]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -169,11 +169,11 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping,
|
|||
if wanted.reportAs != "" {
|
||||
report_name = wanted.reportAs
|
||||
}
|
||||
acc.Add(report_name, data, tags)
|
||||
|
||||
fields[report_name] = data
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("lustre2", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -75,35 +75,38 @@ func gatherReport(acc plugins.Accumulator, report Report, now time.Time) {
|
|||
tags := make(map[string]string)
|
||||
tags["id"] = report.ID
|
||||
tags["campaign_title"] = report.CampaignTitle
|
||||
acc.Add("emails_sent", report.EmailsSent, tags, now)
|
||||
acc.Add("abuse_reports", report.AbuseReports, tags, now)
|
||||
acc.Add("unsubscribed", report.Unsubscribed, tags, now)
|
||||
acc.Add("hard_bounces", report.Bounces.HardBounces, tags, now)
|
||||
acc.Add("soft_bounces", report.Bounces.SoftBounces, tags, now)
|
||||
acc.Add("syntax_errors", report.Bounces.SyntaxErrors, tags, now)
|
||||
acc.Add("forwards_count", report.Forwards.ForwardsCount, tags, now)
|
||||
acc.Add("forwards_opens", report.Forwards.ForwardsOpens, tags, now)
|
||||
acc.Add("opens_total", report.Opens.OpensTotal, tags, now)
|
||||
acc.Add("unique_opens", report.Opens.UniqueOpens, tags, now)
|
||||
acc.Add("open_rate", report.Opens.OpenRate, tags, now)
|
||||
acc.Add("clicks_total", report.Clicks.ClicksTotal, tags, now)
|
||||
acc.Add("unique_clicks", report.Clicks.UniqueClicks, tags, now)
|
||||
acc.Add("unique_subscriber_clicks", report.Clicks.UniqueSubscriberClicks, tags, now)
|
||||
acc.Add("click_rate", report.Clicks.ClickRate, tags, now)
|
||||
acc.Add("facebook_recipient_likes", report.FacebookLikes.RecipientLikes, tags, now)
|
||||
acc.Add("facebook_unique_likes", report.FacebookLikes.UniqueLikes, tags, now)
|
||||
acc.Add("facebook_likes", report.FacebookLikes.FacebookLikes, tags, now)
|
||||
acc.Add("industry_type", report.IndustryStats.Type, tags, now)
|
||||
acc.Add("industry_open_rate", report.IndustryStats.OpenRate, tags, now)
|
||||
acc.Add("industry_click_rate", report.IndustryStats.ClickRate, tags, now)
|
||||
acc.Add("industry_bounce_rate", report.IndustryStats.BounceRate, tags, now)
|
||||
acc.Add("industry_unopen_rate", report.IndustryStats.UnopenRate, tags, now)
|
||||
acc.Add("industry_unsub_rate", report.IndustryStats.UnsubRate, tags, now)
|
||||
acc.Add("industry_abuse_rate", report.IndustryStats.AbuseRate, tags, now)
|
||||
acc.Add("list_stats_sub_rate", report.ListStats.SubRate, tags, now)
|
||||
acc.Add("list_stats_unsub_rate", report.ListStats.UnsubRate, tags, now)
|
||||
acc.Add("list_stats_open_rate", report.ListStats.OpenRate, tags, now)
|
||||
acc.Add("list_stats_click_rate", report.ListStats.ClickRate, tags, now)
|
||||
fields := map[string]interface{}{
|
||||
"emails_sent": report.EmailsSent,
|
||||
"abuse_reports": report.AbuseReports,
|
||||
"unsubscribed": report.Unsubscribed,
|
||||
"hard_bounces": report.Bounces.HardBounces,
|
||||
"soft_bounces": report.Bounces.SoftBounces,
|
||||
"syntax_errors": report.Bounces.SyntaxErrors,
|
||||
"forwards_count": report.Forwards.ForwardsCount,
|
||||
"forwards_opens": report.Forwards.ForwardsOpens,
|
||||
"opens_total": report.Opens.OpensTotal,
|
||||
"unique_opens": report.Opens.UniqueOpens,
|
||||
"open_rate": report.Opens.OpenRate,
|
||||
"clicks_total": report.Clicks.ClicksTotal,
|
||||
"unique_clicks": report.Clicks.UniqueClicks,
|
||||
"unique_subscriber_clicks": report.Clicks.UniqueSubscriberClicks,
|
||||
"click_rate": report.Clicks.ClickRate,
|
||||
"facebook_recipient_likes": report.FacebookLikes.RecipientLikes,
|
||||
"facebook_unique_likes": report.FacebookLikes.UniqueLikes,
|
||||
"facebook_likes": report.FacebookLikes.FacebookLikes,
|
||||
"industry_type": report.IndustryStats.Type,
|
||||
"industry_open_rate": report.IndustryStats.OpenRate,
|
||||
"industry_click_rate": report.IndustryStats.ClickRate,
|
||||
"industry_bounce_rate": report.IndustryStats.BounceRate,
|
||||
"industry_unopen_rate": report.IndustryStats.UnopenRate,
|
||||
"industry_unsub_rate": report.IndustryStats.UnsubRate,
|
||||
"industry_abuse_rate": report.IndustryStats.AbuseRate,
|
||||
"list_stats_sub_rate": report.ListStats.SubRate,
|
||||
"list_stats_unsub_rate": report.ListStats.UnsubRate,
|
||||
"list_stats_open_rate": report.ListStats.OpenRate,
|
||||
"list_stats_click_rate": report.ListStats.ClickRate,
|
||||
}
|
||||
acc.AddFields("mailchimp", fields, tags, now)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -137,16 +137,18 @@ func (m *Memcached) gatherServer(
|
|||
tags := map[string]string{"server": address}
|
||||
|
||||
// Process values
|
||||
fields := make(map[string]interface{})
|
||||
for _, key := range sendMetrics {
|
||||
if value, ok := values[key]; ok {
|
||||
// Mostly it is the number
|
||||
if iValue, errParse := strconv.ParseInt(value, 10, 64); errParse != nil {
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = iValue
|
||||
} else {
|
||||
acc.Add(key, iValue, tags)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("memcached", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -98,7 +98,8 @@ func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error {
|
|||
}
|
||||
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error())
|
||||
return fmt.Errorf("Unable to parse URL (%s), %s\n",
|
||||
dialAddrs[0], err.Error())
|
||||
}
|
||||
dialInfo.Direct = true
|
||||
dialInfo.Timeout = time.Duration(10) * time.Second
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
|
||||
type MongodbData struct {
|
||||
StatLine *StatLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
}
|
||||
|
||||
|
@ -20,6 +21,7 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
|
|||
return &MongodbData{
|
||||
StatLine: statLine,
|
||||
Tags: tags,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -63,38 +65,44 @@ var WiredTigerStats = map[string]string{
|
|||
"percent_cache_used": "CacheUsedPercent",
|
||||
}
|
||||
|
||||
func (d *MongodbData) AddDefaultStats(acc plugins.Accumulator) {
|
||||
func (d *MongodbData) AddDefaultStats() {
|
||||
statLine := reflect.ValueOf(d.StatLine).Elem()
|
||||
d.addStat(acc, statLine, DefaultStats)
|
||||
d.addStat(statLine, DefaultStats)
|
||||
if d.StatLine.NodeType != "" {
|
||||
d.addStat(acc, statLine, DefaultReplStats)
|
||||
d.addStat(statLine, DefaultReplStats)
|
||||
}
|
||||
if d.StatLine.StorageEngine == "mmapv1" {
|
||||
d.addStat(acc, statLine, MmapStats)
|
||||
d.addStat(statLine, MmapStats)
|
||||
} else if d.StatLine.StorageEngine == "wiredTiger" {
|
||||
for key, value := range WiredTigerStats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
percentVal := fmt.Sprintf("%.1f", val.(float64)*100)
|
||||
floatVal, _ := strconv.ParseFloat(percentVal, 64)
|
||||
d.add(acc, key, floatVal)
|
||||
d.add(key, floatVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) addStat(acc plugins.Accumulator, statLine reflect.Value, stats map[string]string) {
|
||||
func (d *MongodbData) addStat(
|
||||
statLine reflect.Value,
|
||||
stats map[string]string,
|
||||
) {
|
||||
for key, value := range stats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
d.add(acc, key, val)
|
||||
d.add(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) add(acc plugins.Accumulator, key string, val interface{}) {
|
||||
func (d *MongodbData) add(key string, val interface{}) {
|
||||
d.Fields[key] = val
|
||||
}
|
||||
|
||||
func (d *MongodbData) flush(acc plugins.Accumulator) {
|
||||
acc.AddFields(
|
||||
key,
|
||||
map[string]interface{}{
|
||||
"value": val,
|
||||
},
|
||||
"mongodb",
|
||||
d.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
d.Fields = make(map[string]interface{})
|
||||
}
|
||||
|
|
|
@ -44,7 +44,8 @@ func (s *Server) gatherData(acc plugins.Accumulator) error {
|
|||
NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds),
|
||||
s.getDefaultTags(),
|
||||
)
|
||||
data.AddDefaultStats(acc)
|
||||
data.AddDefaultStats()
|
||||
data.flush(acc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -138,6 +138,8 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
|
|||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
tags := map[string]string{"server": servtag}
|
||||
fields := make(map[string]interface{})
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var val interface{}
|
||||
|
@ -149,12 +151,10 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
|
|||
|
||||
var found bool
|
||||
|
||||
tags := map[string]string{"server": servtag}
|
||||
|
||||
for _, mapped := range mappings {
|
||||
if strings.HasPrefix(name, mapped.onServer) {
|
||||
i, _ := strconv.Atoi(string(val.([]byte)))
|
||||
acc.Add(mapped.inExport+name[len(mapped.onServer):], i, tags)
|
||||
fields[mapped.inExport+name[len(mapped.onServer):]] = i
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
@ -170,16 +170,17 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
acc.Add("queries", i, tags)
|
||||
fields["queries"] = i
|
||||
case "Slow_queries":
|
||||
i, err := strconv.ParseInt(string(val.([]byte)), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acc.Add("slow_queries", i, tags)
|
||||
fields["slow_queries"] = i
|
||||
}
|
||||
}
|
||||
acc.AddFields("mysql", fields, tags)
|
||||
|
||||
conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
|
||||
|
||||
|
@ -193,11 +194,13 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
|
|||
}
|
||||
|
||||
tags := map[string]string{"server": servtag, "user": user}
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.Add("connections", connections, tags)
|
||||
fields["connections"] = connections
|
||||
acc.AddFields("mysql_users", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -127,14 +127,16 @@ func (n *Nginx) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
|
|||
}
|
||||
|
||||
tags := getTags(addr)
|
||||
|
||||
acc.Add("active", active, tags)
|
||||
acc.Add("accepts", accepts, tags)
|
||||
acc.Add("handled", handled, tags)
|
||||
acc.Add("requests", requests, tags)
|
||||
acc.Add("reading", reading, tags)
|
||||
acc.Add("writing", writing, tags)
|
||||
acc.Add("waiting", waiting, tags)
|
||||
fields := map[string]interface{}{
|
||||
"active": active,
|
||||
"accepts": accepts,
|
||||
"handled": handled,
|
||||
"requests": requests,
|
||||
"reading": reading,
|
||||
"writing": writing,
|
||||
"waiting": waiting,
|
||||
}
|
||||
acc.AddFields("nginx", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -198,9 +198,11 @@ func importMetric(r io.Reader, acc plugins.Accumulator, host string) (poolStat,
|
|||
"url": host,
|
||||
"pool": pool,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for k, v := range stats[pool] {
|
||||
acc.Add(strings.Replace(k, " ", "_", -1), v, tags)
|
||||
fields[strings.Replace(k, " ", "_", -1)] = v
|
||||
}
|
||||
acc.AddFields("phpfpm", fields, tags)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
|
|
|
@ -82,10 +82,13 @@ func (p *Ping) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
// Calculate packet loss percentage
|
||||
loss := float64(trans-rec) / float64(trans) * 100.0
|
||||
acc.Add("packets_transmitted", trans, tags)
|
||||
acc.Add("packets_received", rec, tags)
|
||||
acc.Add("percent_packet_loss", loss, tags)
|
||||
acc.Add("average_response_ms", avg, tags)
|
||||
fields := map[string]interface{}{
|
||||
"packets_transmitted": trans,
|
||||
"packets_received": rec,
|
||||
"percent_packet_loss": loss,
|
||||
"average_response_ms": avg,
|
||||
}
|
||||
acc.AddFields("ping", fields, tags)
|
||||
}(url, acc)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,46 +11,32 @@ import (
|
|||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
type Postgresql struct {
|
||||
Address string
|
||||
Databases []string
|
||||
OrderedColumns []string
|
||||
}
|
||||
|
||||
type Postgresql struct {
|
||||
Servers []*Server
|
||||
}
|
||||
|
||||
var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true}
|
||||
|
||||
var sampleConfig = `
|
||||
# specify servers via an array of tables
|
||||
[[plugins.postgresql.servers]]
|
||||
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
#
|
||||
# All connection parameters are optional. By default, the host is localhost
|
||||
# and the user is the currently running user. For localhost, we default
|
||||
# to sslmode=disable as well.
|
||||
# All connection parameters are optional.
|
||||
#
|
||||
# Without the dbname parameter, the driver will default to a database
|
||||
# with the same name as the user. This dbname is just for instantiating a
|
||||
# connection with the server and doesn't restrict the databases we are trying
|
||||
# to grab metrics for.
|
||||
#
|
||||
|
||||
address = "sslmode=disable"
|
||||
address = "host=localhost user=postgres sslmode=disable"
|
||||
|
||||
# A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# databases are gathered.
|
||||
|
||||
# databases = ["app_production", "blah_testing"]
|
||||
|
||||
# [[plugins.postgresql.servers]]
|
||||
# address = "influx@remoteserver"
|
||||
# databases = ["app_production", "testing"]
|
||||
`
|
||||
|
||||
func (p *Postgresql) SampleConfig() string {
|
||||
|
@ -65,42 +51,27 @@ func (p *Postgresql) IgnoredColumns() map[string]bool {
|
|||
return ignoredColumns
|
||||
}
|
||||
|
||||
var localhost = &Server{Address: "sslmode=disable"}
|
||||
var localhost = "host=localhost sslmode=disable"
|
||||
|
||||
func (p *Postgresql) Gather(acc plugins.Accumulator) error {
|
||||
if len(p.Servers) == 0 {
|
||||
p.gatherServer(localhost, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, serv := range p.Servers {
|
||||
err := p.gatherServer(serv, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error {
|
||||
var query string
|
||||
|
||||
if serv.Address == "" || serv.Address == "localhost" {
|
||||
serv = localhost
|
||||
if p.Address == "" || p.Address == "localhost" {
|
||||
p.Address = localhost
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", serv.Address)
|
||||
db, err := sql.Open("postgres", p.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer db.Close()
|
||||
|
||||
if len(serv.Databases) == 0 {
|
||||
if len(p.Databases) == 0 {
|
||||
query = `SELECT * FROM pg_stat_database`
|
||||
} else {
|
||||
query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`, strings.Join(serv.Databases, "','"))
|
||||
query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`,
|
||||
strings.Join(p.Databases, "','"))
|
||||
}
|
||||
|
||||
rows, err := db.Query(query)
|
||||
|
@ -111,13 +82,13 @@ func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error {
|
|||
defer rows.Close()
|
||||
|
||||
// grab the column information from the result
|
||||
serv.OrderedColumns, err = rows.Columns()
|
||||
p.OrderedColumns, err = rows.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
err = p.accRow(rows, acc, serv)
|
||||
err = p.accRow(rows, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -130,20 +101,20 @@ type scanner interface {
|
|||
Scan(dest ...interface{}) error
|
||||
}
|
||||
|
||||
func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) error {
|
||||
func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator) error {
|
||||
var columnVars []interface{}
|
||||
var dbname bytes.Buffer
|
||||
|
||||
// this is where we'll store the column name with its *interface{}
|
||||
columnMap := make(map[string]*interface{})
|
||||
|
||||
for _, column := range serv.OrderedColumns {
|
||||
for _, column := range p.OrderedColumns {
|
||||
columnMap[column] = new(interface{})
|
||||
}
|
||||
|
||||
// populate the array of interface{} with the pointers in the right order
|
||||
for i := 0; i < len(columnMap); i++ {
|
||||
columnVars = append(columnVars, columnMap[serv.OrderedColumns[i]])
|
||||
columnVars = append(columnVars, columnMap[p.OrderedColumns[i]])
|
||||
}
|
||||
|
||||
// deconstruct array of variables and send to Scan
|
||||
|
@ -159,14 +130,16 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server)
|
|||
dbname.WriteString(string(dbnameChars[i]))
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": serv.Address, "db": dbname.String()}
|
||||
tags := map[string]string{"server": p.Address, "db": dbname.String()}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for col, val := range columnMap {
|
||||
_, ignore := ignoredColumns[col]
|
||||
if !ignore {
|
||||
acc.Add(col, *val, tags)
|
||||
fields[col] = *val
|
||||
}
|
||||
}
|
||||
acc.AddFields("postgresql", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -7,22 +7,17 @@ import (
|
|||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/shirou/gopsutil/process"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type Specification struct {
|
||||
type Procstat struct {
|
||||
PidFile string `toml:"pid_file"`
|
||||
Exe string
|
||||
Prefix string
|
||||
Pattern string
|
||||
}
|
||||
|
||||
type Procstat struct {
|
||||
Specifications []*Specification
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func NewProcstat() *Procstat {
|
||||
|
@ -30,8 +25,6 @@ func NewProcstat() *Procstat {
|
|||
}
|
||||
|
||||
var sampleConfig = `
|
||||
[[plugins.procstat.specifications]]
|
||||
prefix = "" # optional string to prefix measurements
|
||||
# Must specify one of: pid_file, exe, or pattern
|
||||
# PID file to monitor process
|
||||
pid_file = "/var/run/nginx.pid"
|
||||
|
@ -39,6 +32,9 @@ var sampleConfig = `
|
|||
# exe = "nginx"
|
||||
# pattern as argument for pgrep (ie, pgrep -f <pattern>)
|
||||
# pattern = "nginx"
|
||||
|
||||
# Field name prefix
|
||||
prefix = ""
|
||||
`
|
||||
|
||||
func (_ *Procstat) SampleConfig() string {
|
||||
|
@ -50,35 +46,26 @@ func (_ *Procstat) Description() string {
|
|||
}
|
||||
|
||||
func (p *Procstat) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, specification := range p.Specifications {
|
||||
wg.Add(1)
|
||||
go func(spec *Specification, acc plugins.Accumulator) {
|
||||
defer wg.Done()
|
||||
procs, err := spec.createProcesses()
|
||||
if err != nil {
|
||||
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s",
|
||||
spec.Exe, spec.PidFile, spec.Pattern, err.Error())
|
||||
} else {
|
||||
for _, proc := range procs {
|
||||
p := NewSpecProcessor(spec.Prefix, acc, proc)
|
||||
p.pushMetrics()
|
||||
}
|
||||
}
|
||||
}(specification, acc)
|
||||
procs, err := p.createProcesses()
|
||||
if err != nil {
|
||||
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s",
|
||||
p.Exe, p.PidFile, p.Pattern, err.Error())
|
||||
} else {
|
||||
for _, proc := range procs {
|
||||
p := NewSpecProcessor(p.Prefix, acc, proc)
|
||||
p.pushMetrics()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (spec *Specification) createProcesses() ([]*process.Process, error) {
|
||||
func (p *Procstat) createProcesses() ([]*process.Process, error) {
|
||||
var out []*process.Process
|
||||
var errstring string
|
||||
var outerr error
|
||||
|
||||
pids, err := spec.getAllPids()
|
||||
pids, err := p.getAllPids()
|
||||
if err != nil {
|
||||
errstring += err.Error() + " "
|
||||
}
|
||||
|
@ -99,16 +86,16 @@ func (spec *Specification) createProcesses() ([]*process.Process, error) {
|
|||
return out, outerr
|
||||
}
|
||||
|
||||
func (spec *Specification) getAllPids() ([]int32, error) {
|
||||
func (p *Procstat) getAllPids() ([]int32, error) {
|
||||
var pids []int32
|
||||
var err error
|
||||
|
||||
if spec.PidFile != "" {
|
||||
pids, err = pidsFromFile(spec.PidFile)
|
||||
} else if spec.Exe != "" {
|
||||
pids, err = pidsFromExe(spec.Exe)
|
||||
} else if spec.Pattern != "" {
|
||||
pids, err = pidsFromPattern(spec.Pattern)
|
||||
if p.PidFile != "" {
|
||||
pids, err = pidsFromFile(p.PidFile)
|
||||
} else if p.Exe != "" {
|
||||
pids, err = pidsFromExe(p.Exe)
|
||||
} else if p.Pattern != "" {
|
||||
pids, err = pidsFromPattern(p.Pattern)
|
||||
} else {
|
||||
err = fmt.Errorf("Either exe, pid_file or pattern has to be specified")
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
type SpecProcessor struct {
|
||||
Prefix string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
acc plugins.Accumulator
|
||||
proc *process.Process
|
||||
}
|
||||
|
@ -23,7 +24,12 @@ func (p *SpecProcessor) add(metric string, value interface{}) {
|
|||
} else {
|
||||
mname = p.Prefix + "_" + metric
|
||||
}
|
||||
p.acc.Add(mname, value, p.tags)
|
||||
p.fields[mname] = value
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) flush() {
|
||||
p.acc.AddFields("procstat", p.fields, p.tags)
|
||||
p.fields = make(map[string]interface{})
|
||||
}
|
||||
|
||||
func NewSpecProcessor(
|
||||
|
@ -39,6 +45,7 @@ func NewSpecProcessor(
|
|||
return &SpecProcessor{
|
||||
Prefix: prefix,
|
||||
tags: tags,
|
||||
fields: make(map[string]interface{}),
|
||||
acc: acc,
|
||||
proc: p,
|
||||
}
|
||||
|
@ -60,6 +67,7 @@ func (p *SpecProcessor) pushMetrics() {
|
|||
if err := p.pushMemoryStats(); err != nil {
|
||||
log.Printf("procstat, mem stats not available: %s", err.Error())
|
||||
}
|
||||
p.flush()
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushFDStats() error {
|
||||
|
@ -94,21 +102,22 @@ func (p *SpecProcessor) pushIOStats() error {
|
|||
}
|
||||
|
||||
func (p *SpecProcessor) pushCPUStats() error {
|
||||
cpu, err := p.proc.CPUTimes()
|
||||
cpu_time, err := p.proc.CPUTimes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.add("cpu_user", cpu.User)
|
||||
p.add("cpu_system", cpu.System)
|
||||
p.add("cpu_idle", cpu.Idle)
|
||||
p.add("cpu_nice", cpu.Nice)
|
||||
p.add("cpu_iowait", cpu.Iowait)
|
||||
p.add("cpu_irq", cpu.Irq)
|
||||
p.add("cpu_soft_irq", cpu.Softirq)
|
||||
p.add("cpu_soft_steal", cpu.Steal)
|
||||
p.add("cpu_soft_stolen", cpu.Stolen)
|
||||
p.add("cpu_soft_guest", cpu.Guest)
|
||||
p.add("cpu_soft_guest_nice", cpu.GuestNice)
|
||||
p.add("cpu_time_user", cpu_time.User)
|
||||
p.add("cpu_time_system", cpu_time.System)
|
||||
p.add("cpu_time_idle", cpu_time.Idle)
|
||||
p.add("cpu_time_nice", cpu_time.Nice)
|
||||
p.add("cpu_time_iowait", cpu_time.Iowait)
|
||||
p.add("cpu_time_irq", cpu_time.Irq)
|
||||
p.add("cpu_time_soft_irq", cpu_time.Softirq)
|
||||
p.add("cpu_time_soft_steal", cpu_time.Steal)
|
||||
p.add("cpu_time_soft_stolen", cpu_time.Stolen)
|
||||
p.add("cpu_time_soft_guest", cpu_time.Guest)
|
||||
p.add("cpu_time_soft_guest_nice", cpu_time.GuestNice)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -80,14 +80,14 @@ func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("error getting processing samples for %s: %s", url, err)
|
||||
}
|
||||
for _, sample := range samples {
|
||||
tags := map[string]string{}
|
||||
tags := make(map[string]string)
|
||||
for key, value := range sample.Metric {
|
||||
if key == model.MetricNameLabel {
|
||||
continue
|
||||
}
|
||||
tags[string(key)] = string(value)
|
||||
}
|
||||
acc.Add(string(sample.Metric[model.MetricNameLabel]),
|
||||
acc.Add("prometheus_"+string(sample.Metric[model.MetricNameLabel]),
|
||||
float64(sample.Value), tags)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,15 +104,16 @@ func (pa *PuppetAgent) Gather(acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("%s", err)
|
||||
}
|
||||
|
||||
structPrinter(&puppetState, acc)
|
||||
tags := map[string]string{"location": pa.Location}
|
||||
structPrinter(&puppetState, acc, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func structPrinter(s *State, acc plugins.Accumulator) {
|
||||
|
||||
func structPrinter(s *State, acc plugins.Accumulator, tags map[string]string) {
|
||||
e := reflect.ValueOf(s).Elem()
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for tLevelFNum := 0; tLevelFNum < e.NumField(); tLevelFNum++ {
|
||||
name := e.Type().Field(tLevelFNum).Name
|
||||
nameNumField := e.FieldByName(name).NumField()
|
||||
|
@ -123,10 +124,10 @@ func structPrinter(s *State, acc plugins.Accumulator) {
|
|||
|
||||
lname := strings.ToLower(name)
|
||||
lsName := strings.ToLower(sName)
|
||||
acc.Add(fmt.Sprintf("%s_%s", lname, lsName), sValue, nil)
|
||||
fields[fmt.Sprintf("%s_%s", lname, lsName)] = sValue
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("puppetagent", fields, tags)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
@ -13,17 +14,13 @@ const DefaultUsername = "guest"
|
|||
const DefaultPassword = "guest"
|
||||
const DefaultURL = "http://localhost:15672"
|
||||
|
||||
type Server struct {
|
||||
type RabbitMQ struct {
|
||||
URL string
|
||||
Name string
|
||||
Username string
|
||||
Password string
|
||||
Nodes []string
|
||||
Queues []string
|
||||
}
|
||||
|
||||
type RabbitMQ struct {
|
||||
Servers []*Server
|
||||
|
||||
Client *http.Client
|
||||
}
|
||||
|
@ -94,15 +91,13 @@ type Node struct {
|
|||
SocketsUsed int64 `json:"sockets_used"`
|
||||
}
|
||||
|
||||
type gatherFunc func(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error)
|
||||
type gatherFunc func(r *RabbitMQ, acc plugins.Accumulator, errChan chan error)
|
||||
|
||||
var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues}
|
||||
|
||||
var sampleConfig = `
|
||||
# Specify servers via an array of tables
|
||||
[[plugins.rabbitmq.servers]]
|
||||
url = "http://localhost:15672" # required
|
||||
# name = "rmq-server-1" # optional tag
|
||||
# url = "http://localhost:15672"
|
||||
# username = "guest"
|
||||
# password = "guest"
|
||||
|
||||
|
@ -119,27 +114,18 @@ func (r *RabbitMQ) Description() string {
|
|||
return "Read metrics from one or many RabbitMQ servers via the management API"
|
||||
}
|
||||
|
||||
var localhost = &Server{URL: DefaultURL}
|
||||
|
||||
func (r *RabbitMQ) Gather(acc plugins.Accumulator) error {
|
||||
if r.Client == nil {
|
||||
r.Client = &http.Client{}
|
||||
}
|
||||
|
||||
var errChan = make(chan error, len(r.Servers))
|
||||
var errChan = make(chan error, len(gatherFunctions))
|
||||
|
||||
// use localhost is no servers are specified in config
|
||||
if len(r.Servers) == 0 {
|
||||
r.Servers = append(r.Servers, localhost)
|
||||
for _, f := range gatherFunctions {
|
||||
go f(r, acc, errChan)
|
||||
}
|
||||
|
||||
for _, serv := range r.Servers {
|
||||
for _, f := range gatherFunctions {
|
||||
go f(r, serv, acc, errChan)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 1; i <= len(r.Servers)*len(gatherFunctions); i++ {
|
||||
for i := 1; i <= len(gatherFunctions); i++ {
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -149,20 +135,20 @@ func (r *RabbitMQ) Gather(acc plugins.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *RabbitMQ) requestJSON(serv *Server, u string, target interface{}) error {
|
||||
u = fmt.Sprintf("%s%s", serv.URL, u)
|
||||
func (r *RabbitMQ) requestJSON(u string, target interface{}) error {
|
||||
u = fmt.Sprintf("%s%s", r.URL, u)
|
||||
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
username := serv.Username
|
||||
username := r.Username
|
||||
if username == "" {
|
||||
username = DefaultUsername
|
||||
}
|
||||
|
||||
password := serv.Password
|
||||
password := r.Password
|
||||
if password == "" {
|
||||
password = DefaultPassword
|
||||
}
|
||||
|
@ -181,10 +167,10 @@ func (r *RabbitMQ) requestJSON(serv *Server, u string, target interface{}) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) {
|
||||
func gatherOverview(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
|
||||
overview := &OverviewResponse{}
|
||||
|
||||
err := r.requestJSON(serv, "/api/overview", &overview)
|
||||
err := r.requestJSON("/api/overview", &overview)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
|
@ -195,76 +181,80 @@ func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan
|
|||
return
|
||||
}
|
||||
|
||||
tags := map[string]string{"url": serv.URL}
|
||||
if serv.Name != "" {
|
||||
tags["name"] = serv.Name
|
||||
tags := map[string]string{"url": r.URL}
|
||||
if r.Name != "" {
|
||||
tags["name"] = r.Name
|
||||
}
|
||||
|
||||
acc.Add("messages", overview.QueueTotals.Messages, tags)
|
||||
acc.Add("messages_ready", overview.QueueTotals.MessagesReady, tags)
|
||||
acc.Add("messages_unacked", overview.QueueTotals.MessagesUnacknowledged, tags)
|
||||
|
||||
acc.Add("channels", overview.ObjectTotals.Channels, tags)
|
||||
acc.Add("connections", overview.ObjectTotals.Connections, tags)
|
||||
acc.Add("consumers", overview.ObjectTotals.Consumers, tags)
|
||||
acc.Add("exchanges", overview.ObjectTotals.Exchanges, tags)
|
||||
acc.Add("queues", overview.ObjectTotals.Queues, tags)
|
||||
|
||||
acc.Add("messages_acked", overview.MessageStats.Ack, tags)
|
||||
acc.Add("messages_delivered", overview.MessageStats.Deliver, tags)
|
||||
acc.Add("messages_published", overview.MessageStats.Publish, tags)
|
||||
fields := map[string]interface{}{
|
||||
"messages": overview.QueueTotals.Messages,
|
||||
"messages_ready": overview.QueueTotals.MessagesReady,
|
||||
"messages_unacked": overview.QueueTotals.MessagesUnacknowledged,
|
||||
"channels": overview.ObjectTotals.Channels,
|
||||
"connections": overview.ObjectTotals.Connections,
|
||||
"consumers": overview.ObjectTotals.Consumers,
|
||||
"exchanges": overview.ObjectTotals.Exchanges,
|
||||
"queues": overview.ObjectTotals.Queues,
|
||||
"messages_acked": overview.MessageStats.Ack,
|
||||
"messages_delivered": overview.MessageStats.Deliver,
|
||||
"messages_published": overview.MessageStats.Publish,
|
||||
}
|
||||
acc.AddFields("rabbitmq_overview", fields, tags)
|
||||
|
||||
errChan <- nil
|
||||
}
|
||||
|
||||
func gatherNodes(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) {
|
||||
func gatherNodes(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
|
||||
nodes := make([]Node, 0)
|
||||
// Gather information about nodes
|
||||
err := r.requestJSON(serv, "/api/nodes", &nodes)
|
||||
err := r.requestJSON("/api/nodes", &nodes)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
for _, node := range nodes {
|
||||
if !shouldGatherNode(node, serv) {
|
||||
if !r.shouldGatherNode(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := map[string]string{"url": serv.URL}
|
||||
tags := map[string]string{"url": r.URL}
|
||||
tags["node"] = node.Name
|
||||
|
||||
acc.Add("disk_free", node.DiskFree, tags)
|
||||
acc.Add("disk_free_limit", node.DiskFreeLimit, tags)
|
||||
acc.Add("fd_total", node.FdTotal, tags)
|
||||
acc.Add("fd_used", node.FdUsed, tags)
|
||||
acc.Add("mem_limit", node.MemLimit, tags)
|
||||
acc.Add("mem_used", node.MemUsed, tags)
|
||||
acc.Add("proc_total", node.ProcTotal, tags)
|
||||
acc.Add("proc_used", node.ProcUsed, tags)
|
||||
acc.Add("run_queue", node.RunQueue, tags)
|
||||
acc.Add("sockets_total", node.SocketsTotal, tags)
|
||||
acc.Add("sockets_used", node.SocketsUsed, tags)
|
||||
fields := map[string]interface{}{
|
||||
"disk_free": node.DiskFree,
|
||||
"disk_free_limit": node.DiskFreeLimit,
|
||||
"fd_total": node.FdTotal,
|
||||
"fd_used": node.FdUsed,
|
||||
"mem_limit": node.MemLimit,
|
||||
"mem_used": node.MemUsed,
|
||||
"proc_total": node.ProcTotal,
|
||||
"proc_used": node.ProcUsed,
|
||||
"run_queue": node.RunQueue,
|
||||
"sockets_total": node.SocketsTotal,
|
||||
"sockets_used": node.SocketsUsed,
|
||||
}
|
||||
acc.AddFields("rabbitmq_node", fields, tags, now)
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
}
|
||||
|
||||
func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) {
|
||||
func gatherQueues(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
|
||||
// Gather information about queues
|
||||
queues := make([]Queue, 0)
|
||||
err := r.requestJSON(serv, "/api/queues", &queues)
|
||||
err := r.requestJSON("/api/queues", &queues)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
for _, queue := range queues {
|
||||
if !shouldGatherQueue(queue, serv) {
|
||||
if !r.shouldGatherQueue(queue) {
|
||||
continue
|
||||
}
|
||||
tags := map[string]string{
|
||||
"url": serv.URL,
|
||||
"url": r.URL,
|
||||
"queue": queue.Name,
|
||||
"vhost": queue.Vhost,
|
||||
"node": queue.Node,
|
||||
|
@ -273,7 +263,7 @@ func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan ch
|
|||
}
|
||||
|
||||
acc.AddFields(
|
||||
"queue",
|
||||
"rabbitmq_queue",
|
||||
map[string]interface{}{
|
||||
// common information
|
||||
"consumers": queue.Consumers,
|
||||
|
@ -301,12 +291,12 @@ func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan ch
|
|||
errChan <- nil
|
||||
}
|
||||
|
||||
func shouldGatherNode(node Node, serv *Server) bool {
|
||||
if len(serv.Nodes) == 0 {
|
||||
func (r *RabbitMQ) shouldGatherNode(node Node) bool {
|
||||
if len(r.Nodes) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, name := range serv.Nodes {
|
||||
for _, name := range r.Nodes {
|
||||
if name == node.Name {
|
||||
return true
|
||||
}
|
||||
|
@ -315,12 +305,12 @@ func shouldGatherNode(node Node, serv *Server) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func shouldGatherQueue(queue Queue, serv *Server) bool {
|
||||
if len(serv.Queues) == 0 {
|
||||
func (r *RabbitMQ) shouldGatherQueue(queue Queue) bool {
|
||||
if len(r.Queues) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, name := range serv.Queues {
|
||||
for _, name := range r.Queues {
|
||||
if name == queue.Name {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -164,6 +164,7 @@ func gatherInfoOutput(
|
|||
var keyspace_hits, keyspace_misses uint64 = 0, 0
|
||||
|
||||
scanner := bufio.NewScanner(rdr)
|
||||
fields := make(map[string]interface{})
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.Contains(line, "ERR") {
|
||||
|
@ -199,7 +200,7 @@ func gatherInfoOutput(
|
|||
}
|
||||
|
||||
if err == nil {
|
||||
acc.Add(metric, ival, tags)
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -208,13 +209,14 @@ func gatherInfoOutput(
|
|||
return err
|
||||
}
|
||||
|
||||
acc.Add(metric, fval, tags)
|
||||
fields[metric] = fval
|
||||
}
|
||||
var keyspace_hitrate float64 = 0.0
|
||||
if keyspace_hits != 0 || keyspace_misses != 0 {
|
||||
keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses)
|
||||
}
|
||||
acc.Add("keyspace_hitrate", keyspace_hitrate, tags)
|
||||
fields["keyspace_hitrate"] = keyspace_hitrate
|
||||
acc.AddFields("redis", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -229,15 +231,17 @@ func gatherKeyspaceLine(
|
|||
tags map[string]string,
|
||||
) {
|
||||
if strings.Contains(line, "keys=") {
|
||||
fields := make(map[string]interface{})
|
||||
tags["database"] = name
|
||||
dbparts := strings.Split(line, ",")
|
||||
for _, dbp := range dbparts {
|
||||
kv := strings.Split(dbp, "=")
|
||||
ival, err := strconv.ParseUint(kv[1], 10, 64)
|
||||
if err == nil {
|
||||
acc.Add(kv[0], ival, tags)
|
||||
fields[kv[0]] = ival
|
||||
}
|
||||
}
|
||||
acc.AddFields("redis_keyspace", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,25 +86,30 @@ var engineStats = map[string]string{
|
|||
"total_writes": "TotalWrites",
|
||||
}
|
||||
|
||||
func (e *Engine) AddEngineStats(keys []string, acc plugins.Accumulator, tags map[string]string) {
|
||||
func (e *Engine) AddEngineStats(
|
||||
keys []string,
|
||||
acc plugins.Accumulator,
|
||||
tags map[string]string,
|
||||
) {
|
||||
engine := reflect.ValueOf(e).Elem()
|
||||
fields := make(map[string]interface{})
|
||||
for _, key := range keys {
|
||||
acc.Add(
|
||||
key,
|
||||
engine.FieldByName(engineStats[key]).Interface(),
|
||||
tags,
|
||||
)
|
||||
fields[key] = engine.FieldByName(engineStats[key]).Interface()
|
||||
}
|
||||
acc.AddFields("rethinkdb_engine", fields, tags)
|
||||
}
|
||||
|
||||
func (s *Storage) AddStats(acc plugins.Accumulator, tags map[string]string) {
|
||||
acc.Add("cache_bytes_in_use", s.Cache.BytesInUse, tags)
|
||||
acc.Add("disk_read_bytes_per_sec", s.Disk.ReadBytesPerSec, tags)
|
||||
acc.Add("disk_read_bytes_total", s.Disk.ReadBytesTotal, tags)
|
||||
acc.Add("disk_written_bytes_per_sec", s.Disk.WriteBytesPerSec, tags)
|
||||
acc.Add("disk_written_bytes_total", s.Disk.WriteBytesTotal, tags)
|
||||
acc.Add("disk_usage_data_bytes", s.Disk.SpaceUsage.Data, tags)
|
||||
acc.Add("disk_usage_garbage_bytes", s.Disk.SpaceUsage.Garbage, tags)
|
||||
acc.Add("disk_usage_metadata_bytes", s.Disk.SpaceUsage.Metadata, tags)
|
||||
acc.Add("disk_usage_preallocated_bytes", s.Disk.SpaceUsage.Prealloc, tags)
|
||||
fields := map[string]interface{}{
|
||||
"cache_bytes_in_use": s.Cache.BytesInUse,
|
||||
"disk_read_bytes_per_sec": s.Disk.ReadBytesPerSec,
|
||||
"disk_read_bytes_total": s.Disk.ReadBytesTotal,
|
||||
"disk_written_bytes_per_sec": s.Disk.WriteBytesPerSec,
|
||||
"disk_written_bytes_total": s.Disk.WriteBytesTotal,
|
||||
"disk_usage_data_bytes": s.Disk.SpaceUsage.Data,
|
||||
"disk_usage_garbage_bytes": s.Disk.SpaceUsage.Garbage,
|
||||
"disk_usage_metadata_bytes": s.Disk.SpaceUsage.Metadata,
|
||||
"disk_usage_preallocated_bytes": s.Disk.SpaceUsage.Prealloc,
|
||||
}
|
||||
acc.AddFields("rethinkdb", fields, tags)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package system
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
|
@ -31,7 +32,7 @@ var sampleConfig = `
|
|||
# Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
# Comment this line if you want the raw CPU time metrics
|
||||
drop = ["cpu_time*"]
|
||||
drop = ["time_*"]
|
||||
`
|
||||
|
||||
func (_ *CPUStats) SampleConfig() string {
|
||||
|
@ -43,6 +44,7 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error getting CPU info: %s", err)
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
for i, cts := range times {
|
||||
tags := map[string]string{
|
||||
|
@ -51,21 +53,24 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error {
|
|||
|
||||
total := totalCpuTime(cts)
|
||||
|
||||
// Add total cpu numbers
|
||||
add(acc, "time_user", cts.User, tags)
|
||||
add(acc, "time_system", cts.System, tags)
|
||||
add(acc, "time_idle", cts.Idle, tags)
|
||||
add(acc, "time_nice", cts.Nice, tags)
|
||||
add(acc, "time_iowait", cts.Iowait, tags)
|
||||
add(acc, "time_irq", cts.Irq, tags)
|
||||
add(acc, "time_softirq", cts.Softirq, tags)
|
||||
add(acc, "time_steal", cts.Steal, tags)
|
||||
add(acc, "time_guest", cts.Guest, tags)
|
||||
add(acc, "time_guest_nice", cts.GuestNice, tags)
|
||||
// Add cpu time metrics
|
||||
fields := map[string]interface{}{
|
||||
"time_user": cts.User,
|
||||
"time_system": cts.System,
|
||||
"time_idle": cts.Idle,
|
||||
"time_nice": cts.Nice,
|
||||
"time_iowait": cts.Iowait,
|
||||
"time_irq": cts.Irq,
|
||||
"time_softirq": cts.Softirq,
|
||||
"time_steal": cts.Steal,
|
||||
"time_guest": cts.Guest,
|
||||
"time_guest_nice": cts.GuestNice,
|
||||
}
|
||||
|
||||
// Add in percentage
|
||||
if len(s.lastStats) == 0 {
|
||||
// If it's the 1st gather, can't get CPU stats yet
|
||||
acc.AddFields("cpu", fields, tags, now)
|
||||
// If it's the 1st gather, can't get CPU Usage stats yet
|
||||
continue
|
||||
}
|
||||
lastCts := s.lastStats[i]
|
||||
|
@ -81,17 +86,17 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error {
|
|||
continue
|
||||
}
|
||||
|
||||
add(acc, "usage_user", 100*(cts.User-lastCts.User)/totalDelta, tags)
|
||||
add(acc, "usage_system", 100*(cts.System-lastCts.System)/totalDelta, tags)
|
||||
add(acc, "usage_idle", 100*(cts.Idle-lastCts.Idle)/totalDelta, tags)
|
||||
add(acc, "usage_nice", 100*(cts.Nice-lastCts.Nice)/totalDelta, tags)
|
||||
add(acc, "usage_iowait", 100*(cts.Iowait-lastCts.Iowait)/totalDelta, tags)
|
||||
add(acc, "usage_irq", 100*(cts.Irq-lastCts.Irq)/totalDelta, tags)
|
||||
add(acc, "usage_softirq", 100*(cts.Softirq-lastCts.Softirq)/totalDelta, tags)
|
||||
add(acc, "usage_steal", 100*(cts.Steal-lastCts.Steal)/totalDelta, tags)
|
||||
add(acc, "usage_guest", 100*(cts.Guest-lastCts.Guest)/totalDelta, tags)
|
||||
add(acc, "usage_guest_nice", 100*(cts.GuestNice-lastCts.GuestNice)/totalDelta, tags)
|
||||
|
||||
fields["usage_user"] = 100 * (cts.User - lastCts.User) / totalDelta
|
||||
fields["usage_system"] = 100 * (cts.System - lastCts.System) / totalDelta
|
||||
fields["usage_idle"] = 100 * (cts.Idle - lastCts.Idle) / totalDelta
|
||||
fields["usage_nice"] = 100 * (cts.Nice - lastCts.Nice) / totalDelta
|
||||
fields["usage_iowait"] = 100 * (cts.Iowait - lastCts.Iowait) / totalDelta
|
||||
fields["usage_irq"] = 100 * (cts.Irq - lastCts.Irq) / totalDelta
|
||||
fields["usage_softirq"] = 100 * (cts.Softirq - lastCts.Softirq) / totalDelta
|
||||
fields["usage_steal"] = 100 * (cts.Steal - lastCts.Steal) / totalDelta
|
||||
fields["usage_guest"] = 100 * (cts.Guest - lastCts.Guest) / totalDelta
|
||||
fields["usage_guest_nice"] = 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta
|
||||
acc.AddFields("cpu", fields, tags, now)
|
||||
}
|
||||
|
||||
s.lastStats = times
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCPUStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
cts := cpu.CPUTimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 3.1,
|
||||
System: 8.2,
|
||||
Idle: 80.1,
|
||||
Nice: 1.3,
|
||||
Iowait: 0.2,
|
||||
Irq: 0.1,
|
||||
Softirq: 0.11,
|
||||
Steal: 0.0511,
|
||||
Guest: 8.1,
|
||||
GuestNice: 0.324,
|
||||
}
|
||||
|
||||
cts2 := cpu.CPUTimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 11.4, // increased by 8.3
|
||||
System: 10.9, // increased by 2.7
|
||||
Idle: 158.8699, // increased by 78.7699 (for total increase of 100)
|
||||
Nice: 2.5, // increased by 1.2
|
||||
Iowait: 0.7, // increased by 0.5
|
||||
Irq: 1.2, // increased by 1.1
|
||||
Softirq: 0.31, // increased by 0.2
|
||||
Steal: 0.2812, // increased by 0.0001
|
||||
Guest: 12.9, // increased by 4.8
|
||||
GuestNice: 2.524, // increased by 2.2
|
||||
}
|
||||
|
||||
mps.On("CPUTimes").Return([]cpu.CPUTimesStat{cts}, nil)
|
||||
|
||||
cs := NewCPUStats(&mps)
|
||||
|
||||
cputags := map[string]string{
|
||||
"cpu": "cpu0",
|
||||
}
|
||||
|
||||
err := cs.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
numCPUPoints := len(acc.Points)
|
||||
|
||||
expectedCPUPoints := 10
|
||||
assert.Equal(t, expectedCPUPoints, numCPUPoints)
|
||||
|
||||
// Computed values are checked with delta > 0 becasue of floating point arithmatic
|
||||
// imprecision
|
||||
assertContainsTaggedFloat(t, &acc, "time_user", 3.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_iowait", 0.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_irq", 0.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest", 8.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0, cputags)
|
||||
|
||||
mps2 := MockPS{}
|
||||
mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil)
|
||||
cs.ps = &mps2
|
||||
|
||||
// Should have added cpu percentages too
|
||||
err = cs.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numCPUPoints = len(acc.Points) - numCPUPoints
|
||||
expectedCPUPoints = 20
|
||||
assert.Equal(t, expectedCPUPoints, numCPUPoints)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "time_user", 11.4, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_idle", 158.8699, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_nice", 2.5, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_iowait", 0.7, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest", 12.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0, cputags)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "usage_user", 8.3, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_idle", 78.7699, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_nice", 1.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.5, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_irq", 1.1, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_guest", 4.8, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005, cputags)
|
||||
}
|
|
@ -50,12 +50,15 @@ func (s *DiskStats) Gather(acc plugins.Accumulator) error {
|
|||
"path": du.Path,
|
||||
"fstype": du.Fstype,
|
||||
}
|
||||
acc.Add("total", du.Total, tags)
|
||||
acc.Add("free", du.Free, tags)
|
||||
acc.Add("used", du.Total-du.Free, tags)
|
||||
acc.Add("inodes_total", du.InodesTotal, tags)
|
||||
acc.Add("inodes_free", du.InodesFree, tags)
|
||||
acc.Add("inodes_used", du.InodesTotal-du.InodesFree, tags)
|
||||
fields := map[string]interface{}{
|
||||
"total": du.Total,
|
||||
"free": du.Free,
|
||||
"used": du.Total - du.Free,
|
||||
"inodes_total": du.InodesTotal,
|
||||
"inodes_free": du.InodesFree,
|
||||
"inodes_used": du.InodesTotal - du.InodesFree,
|
||||
}
|
||||
acc.AddFields("disk", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -115,13 +118,16 @@ func (s *DiskIOStats) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
}
|
||||
|
||||
acc.Add("reads", io.ReadCount, tags)
|
||||
acc.Add("writes", io.WriteCount, tags)
|
||||
acc.Add("read_bytes", io.ReadBytes, tags)
|
||||
acc.Add("write_bytes", io.WriteBytes, tags)
|
||||
acc.Add("read_time", io.ReadTime, tags)
|
||||
acc.Add("write_time", io.WriteTime, tags)
|
||||
acc.Add("io_time", io.IoTime, tags)
|
||||
fields := map[string]interface{}{
|
||||
"reads": io.ReadCount,
|
||||
"writes": io.WriteCount,
|
||||
"read_bytes": io.ReadBytes,
|
||||
"write_bytes": io.WriteBytes,
|
||||
"read_time": io.ReadTime,
|
||||
"write_time": io.WriteTime,
|
||||
"io_time": io.IoTime,
|
||||
}
|
||||
acc.AddFields("diskio", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -132,7 +138,7 @@ func init() {
|
|||
return &DiskStats{ps: &systemPS{}}
|
||||
})
|
||||
|
||||
plugins.Add("io", func() plugins.Plugin {
|
||||
plugins.Add("diskio", func() plugins.Plugin {
|
||||
return &DiskIOStats{ps: &systemPS{}}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/disk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDiskStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
var err error
|
||||
|
||||
du := []*disk.DiskUsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Fstype: "ext4",
|
||||
Total: 128,
|
||||
Free: 23,
|
||||
InodesTotal: 1234,
|
||||
InodesFree: 234,
|
||||
},
|
||||
{
|
||||
Path: "/home",
|
||||
Fstype: "ext4",
|
||||
Total: 256,
|
||||
Free: 46,
|
||||
InodesTotal: 2468,
|
||||
InodesFree: 468,
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("DiskUsage").Return(du, nil)
|
||||
|
||||
err = (&DiskStats{ps: &mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numDiskPoints := len(acc.Points)
|
||||
expectedAllDiskPoints := 12
|
||||
assert.Equal(t, expectedAllDiskPoints, numDiskPoints)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"path": "/",
|
||||
"fstype": "ext4",
|
||||
}
|
||||
tags2 := map[string]string{
|
||||
"path": "/home",
|
||||
"fstype": "ext4",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("total", uint64(128), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("used", uint64(105), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("free", uint64(23), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(1234), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(234), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(1000), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("total", uint64(256), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("used", uint64(210), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("free", uint64(46), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(2468), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(468), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(2000), tags2))
|
||||
|
||||
// We expect 6 more DiskPoints to show up with an explicit match on "/"
|
||||
// and /home not matching the /dev in Mountpoints
|
||||
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc)
|
||||
assert.Equal(t, expectedAllDiskPoints+6, len(acc.Points))
|
||||
|
||||
// We should see all the diskpoints as Mountpoints includes both
|
||||
// / and /home
|
||||
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc)
|
||||
assert.Equal(t, 2*expectedAllDiskPoints+6, len(acc.Points))
|
||||
|
||||
}
|
||||
|
||||
func TestDiskIOStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
var err error
|
||||
|
||||
diskio1 := disk.DiskIOCountersStat{
|
||||
|
||||
ReadCount: 888,
|
||||
WriteCount: 5341,
|
||||
ReadBytes: 100000,
|
||||
WriteBytes: 200000,
|
||||
ReadTime: 7123,
|
||||
WriteTime: 9087,
|
||||
Name: "sda1",
|
||||
IoTime: 123552,
|
||||
SerialNumber: "ab-123-ad",
|
||||
}
|
||||
diskio2 := disk.DiskIOCountersStat{
|
||||
ReadCount: 444,
|
||||
WriteCount: 2341,
|
||||
ReadBytes: 200000,
|
||||
WriteBytes: 400000,
|
||||
ReadTime: 3123,
|
||||
WriteTime: 6087,
|
||||
Name: "sdb1",
|
||||
IoTime: 246552,
|
||||
SerialNumber: "bb-123-ad",
|
||||
}
|
||||
|
||||
mps.On("DiskIO").Return(
|
||||
map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2},
|
||||
nil)
|
||||
|
||||
err = (&DiskIOStats{ps: &mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numDiskIOPoints := len(acc.Points)
|
||||
expectedAllDiskIOPoints := 14
|
||||
assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints)
|
||||
|
||||
dtags1 := map[string]string{
|
||||
"name": "sda1",
|
||||
"serial": "ab-123-ad",
|
||||
}
|
||||
dtags2 := map[string]string{
|
||||
"name": "sdb1",
|
||||
"serial": "bb-123-ad",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2))
|
||||
|
||||
// We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1"
|
||||
// and serial should be missing from the tags with SkipSerialNumber set
|
||||
err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc)
|
||||
assert.Equal(t, expectedAllDiskIOPoints+7, len(acc.Points))
|
||||
|
||||
dtags3 := map[string]string{
|
||||
"name": "sdb1",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3))
|
||||
}
|
|
@ -36,44 +36,47 @@ func (s *DockerStats) Gather(acc plugins.Accumulator) error {
|
|||
|
||||
cts := cont.CPU
|
||||
|
||||
acc.Add("user", cts.User, tags)
|
||||
acc.Add("system", cts.System, tags)
|
||||
acc.Add("idle", cts.Idle, tags)
|
||||
acc.Add("nice", cts.Nice, tags)
|
||||
acc.Add("iowait", cts.Iowait, tags)
|
||||
acc.Add("irq", cts.Irq, tags)
|
||||
acc.Add("softirq", cts.Softirq, tags)
|
||||
acc.Add("steal", cts.Steal, tags)
|
||||
acc.Add("guest", cts.Guest, tags)
|
||||
acc.Add("guest_nice", cts.GuestNice, tags)
|
||||
fields := map[string]interface{}{
|
||||
"user": cts.User,
|
||||
"system": cts.System,
|
||||
"idle": cts.Idle,
|
||||
"nice": cts.Nice,
|
||||
"iowait": cts.Iowait,
|
||||
"irq": cts.Irq,
|
||||
"softirq": cts.Softirq,
|
||||
"steal": cts.Steal,
|
||||
"guest": cts.Guest,
|
||||
"guest_nice": cts.GuestNice,
|
||||
|
||||
acc.Add("cache", cont.Mem.Cache, tags)
|
||||
acc.Add("rss", cont.Mem.RSS, tags)
|
||||
acc.Add("rss_huge", cont.Mem.RSSHuge, tags)
|
||||
acc.Add("mapped_file", cont.Mem.MappedFile, tags)
|
||||
acc.Add("swap_in", cont.Mem.Pgpgin, tags)
|
||||
acc.Add("swap_out", cont.Mem.Pgpgout, tags)
|
||||
acc.Add("page_fault", cont.Mem.Pgfault, tags)
|
||||
acc.Add("page_major_fault", cont.Mem.Pgmajfault, tags)
|
||||
acc.Add("inactive_anon", cont.Mem.InactiveAnon, tags)
|
||||
acc.Add("active_anon", cont.Mem.ActiveAnon, tags)
|
||||
acc.Add("inactive_file", cont.Mem.InactiveFile, tags)
|
||||
acc.Add("active_file", cont.Mem.ActiveFile, tags)
|
||||
acc.Add("unevictable", cont.Mem.Unevictable, tags)
|
||||
acc.Add("memory_limit", cont.Mem.HierarchicalMemoryLimit, tags)
|
||||
acc.Add("total_cache", cont.Mem.TotalCache, tags)
|
||||
acc.Add("total_rss", cont.Mem.TotalRSS, tags)
|
||||
acc.Add("total_rss_huge", cont.Mem.TotalRSSHuge, tags)
|
||||
acc.Add("total_mapped_file", cont.Mem.TotalMappedFile, tags)
|
||||
acc.Add("total_swap_in", cont.Mem.TotalPgpgIn, tags)
|
||||
acc.Add("total_swap_out", cont.Mem.TotalPgpgOut, tags)
|
||||
acc.Add("total_page_fault", cont.Mem.TotalPgFault, tags)
|
||||
acc.Add("total_page_major_fault", cont.Mem.TotalPgMajFault, tags)
|
||||
acc.Add("total_inactive_anon", cont.Mem.TotalInactiveAnon, tags)
|
||||
acc.Add("total_active_anon", cont.Mem.TotalActiveAnon, tags)
|
||||
acc.Add("total_inactive_file", cont.Mem.TotalInactiveFile, tags)
|
||||
acc.Add("total_active_file", cont.Mem.TotalActiveFile, tags)
|
||||
acc.Add("total_unevictable", cont.Mem.TotalUnevictable, tags)
|
||||
"cache": cont.Mem.Cache,
|
||||
"rss": cont.Mem.RSS,
|
||||
"rss_huge": cont.Mem.RSSHuge,
|
||||
"mapped_file": cont.Mem.MappedFile,
|
||||
"swap_in": cont.Mem.Pgpgin,
|
||||
"swap_out": cont.Mem.Pgpgout,
|
||||
"page_fault": cont.Mem.Pgfault,
|
||||
"page_major_fault": cont.Mem.Pgmajfault,
|
||||
"inactive_anon": cont.Mem.InactiveAnon,
|
||||
"active_anon": cont.Mem.ActiveAnon,
|
||||
"inactive_file": cont.Mem.InactiveFile,
|
||||
"active_file": cont.Mem.ActiveFile,
|
||||
"unevictable": cont.Mem.Unevictable,
|
||||
"memory_limit": cont.Mem.HierarchicalMemoryLimit,
|
||||
"total_cache": cont.Mem.TotalCache,
|
||||
"total_rss": cont.Mem.TotalRSS,
|
||||
"total_rss_huge": cont.Mem.TotalRSSHuge,
|
||||
"total_mapped_file": cont.Mem.TotalMappedFile,
|
||||
"total_swap_in": cont.Mem.TotalPgpgIn,
|
||||
"total_swap_out": cont.Mem.TotalPgpgOut,
|
||||
"total_page_fault": cont.Mem.TotalPgFault,
|
||||
"total_page_major_fault": cont.Mem.TotalPgMajFault,
|
||||
"total_inactive_anon": cont.Mem.TotalInactiveAnon,
|
||||
"total_active_anon": cont.Mem.TotalActiveAnon,
|
||||
"total_inactive_file": cont.Mem.TotalInactiveFile,
|
||||
"total_active_file": cont.Mem.TotalActiveFile,
|
||||
"total_unevictable": cont.Mem.TotalUnevictable,
|
||||
}
|
||||
acc.AddFields("docker", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -22,18 +22,17 @@ func (s *MemStats) Gather(acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("error getting virtual memory info: %s", err)
|
||||
}
|
||||
|
||||
vmtags := map[string]string(nil)
|
||||
|
||||
acc.Add("total", vm.Total, vmtags)
|
||||
acc.Add("available", vm.Available, vmtags)
|
||||
acc.Add("used", vm.Used, vmtags)
|
||||
acc.Add("free", vm.Free, vmtags)
|
||||
acc.Add("cached", vm.Cached, vmtags)
|
||||
acc.Add("buffered", vm.Buffers, vmtags)
|
||||
acc.Add("used_percent", 100*float64(vm.Used)/float64(vm.Total), vmtags)
|
||||
acc.Add("available_percent",
|
||||
100*float64(vm.Available)/float64(vm.Total),
|
||||
vmtags)
|
||||
fields := map[string]interface{}{
|
||||
"total": vm.Total,
|
||||
"available": vm.Available,
|
||||
"used": vm.Used,
|
||||
"free": vm.Free,
|
||||
"cached": vm.Cached,
|
||||
"buffered": vm.Buffers,
|
||||
"used_percent": 100 * float64(vm.Used) / float64(vm.Total),
|
||||
"available_percent": 100 * float64(vm.Available) / float64(vm.Total),
|
||||
}
|
||||
acc.AddFields("mem", fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -54,14 +53,15 @@ func (s *SwapStats) Gather(acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("error getting swap memory info: %s", err)
|
||||
}
|
||||
|
||||
swaptags := map[string]string(nil)
|
||||
|
||||
acc.Add("total", swap.Total, swaptags)
|
||||
acc.Add("used", swap.Used, swaptags)
|
||||
acc.Add("free", swap.Free, swaptags)
|
||||
acc.Add("used_percent", swap.UsedPercent, swaptags)
|
||||
acc.Add("in", swap.Sin, swaptags)
|
||||
acc.Add("out", swap.Sout, swaptags)
|
||||
fields := map[string]interface{}{
|
||||
"total": swap.Total,
|
||||
"used": swap.Used,
|
||||
"free": swap.Free,
|
||||
"used_percent": swap.UsedPercent,
|
||||
"in": swap.Sin,
|
||||
"out": swap.Sout,
|
||||
}
|
||||
acc.AddFields("swap", fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/mem"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMemStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
var err error
|
||||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
vms := &mem.VirtualMemoryStat{
|
||||
Total: 12400,
|
||||
Available: 7600,
|
||||
Used: 5000,
|
||||
Free: 1235,
|
||||
// Active: 8134,
|
||||
// Inactive: 1124,
|
||||
// Buffers: 771,
|
||||
// Cached: 4312,
|
||||
// Wired: 134,
|
||||
// Shared: 2142,
|
||||
}
|
||||
|
||||
mps.On("VMStat").Return(vms, nil)
|
||||
|
||||
sms := &mem.SwapMemoryStat{
|
||||
Total: 8123,
|
||||
Used: 1232,
|
||||
Free: 6412,
|
||||
UsedPercent: 12.2,
|
||||
Sin: 7,
|
||||
Sout: 830,
|
||||
}
|
||||
|
||||
mps.On("SwapStat").Return(sms, nil)
|
||||
|
||||
err = (&MemStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
vmtags := map[string]string(nil)
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("total", uint64(12400), vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("available", uint64(7600), vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("used", uint64(5000), vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("available_percent",
|
||||
float64(7600)/float64(12400)*100,
|
||||
vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("used_percent",
|
||||
float64(5000)/float64(12400)*100,
|
||||
vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("free", uint64(1235), vmtags))
|
||||
|
||||
acc.Points = nil
|
||||
|
||||
err = (&SwapStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
swaptags := map[string]string(nil)
|
||||
|
||||
assert.NoError(t, acc.ValidateTaggedValue("total", uint64(8123), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("used", uint64(1232), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("used_percent", float64(12.2), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("free", uint64(6412), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("in", uint64(7), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("out", uint64(830), swaptags))
|
||||
}
|
|
@ -70,14 +70,17 @@ func (s *NetIOStats) Gather(acc plugins.Accumulator) error {
|
|||
"interface": io.Name,
|
||||
}
|
||||
|
||||
acc.Add("bytes_sent", io.BytesSent, tags)
|
||||
acc.Add("bytes_recv", io.BytesRecv, tags)
|
||||
acc.Add("packets_sent", io.PacketsSent, tags)
|
||||
acc.Add("packets_recv", io.PacketsRecv, tags)
|
||||
acc.Add("err_in", io.Errin, tags)
|
||||
acc.Add("err_out", io.Errout, tags)
|
||||
acc.Add("drop_in", io.Dropin, tags)
|
||||
acc.Add("drop_out", io.Dropout, tags)
|
||||
fields := map[string]interface{}{
|
||||
"bytes_sent": io.BytesSent,
|
||||
"bytes_recv": io.BytesRecv,
|
||||
"packets_sent": io.PacketsSent,
|
||||
"packets_recv": io.PacketsRecv,
|
||||
"err_in": io.Errin,
|
||||
"err_out": io.Errout,
|
||||
"drop_in": io.Dropin,
|
||||
"drop_out": io.Dropout,
|
||||
}
|
||||
acc.AddFields("net", fields, tags)
|
||||
}
|
||||
|
||||
// Get system wide stats for different network protocols
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/net"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNetStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
var err error
|
||||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
netio := net.NetIOCountersStat{
|
||||
Name: "eth0",
|
||||
BytesSent: 1123,
|
||||
BytesRecv: 8734422,
|
||||
PacketsSent: 781,
|
||||
PacketsRecv: 23456,
|
||||
Errin: 832,
|
||||
Errout: 8,
|
||||
Dropin: 7,
|
||||
Dropout: 1,
|
||||
}
|
||||
|
||||
mps.On("NetIO").Return([]net.NetIOCountersStat{netio}, nil)
|
||||
|
||||
netprotos := []net.NetProtoCountersStat{
|
||||
net.NetProtoCountersStat{
|
||||
Protocol: "Udp",
|
||||
Stats: map[string]int64{
|
||||
"InDatagrams": 4655,
|
||||
"NoPorts": 892592,
|
||||
},
|
||||
},
|
||||
}
|
||||
mps.On("NetProto").Return(netprotos, nil)
|
||||
|
||||
netstats := []net.NetConnectionStat{
|
||||
net.NetConnectionStat{
|
||||
Type: syscall.SOCK_DGRAM,
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "CLOSE",
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("NetConnections").Return(netstats, nil)
|
||||
|
||||
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
ntags := map[string]string{
|
||||
"interface": "eth0",
|
||||
}
|
||||
|
||||
assert.NoError(t, acc.ValidateTaggedValue("bytes_sent", uint64(1123), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("bytes_recv", uint64(8734422), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("packets_sent", uint64(781), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("packets_recv", uint64(23456), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("err_in", uint64(832), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("err_out", uint64(8), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("drop_in", uint64(7), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("drop_out", uint64(1), ntags))
|
||||
assert.NoError(t, acc.ValidateValue("udp_noports", int64(892592)))
|
||||
assert.NoError(t, acc.ValidateValue("udp_indatagrams", int64(4655)))
|
||||
|
||||
acc.Points = nil
|
||||
|
||||
err = (&NetStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
netstattags := map[string]string(nil)
|
||||
|
||||
assert.NoError(t, acc.ValidateTaggedValue("tcp_established", 2, netstattags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("tcp_close", 1, netstattags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("udp_socket", 1, netstattags))
|
||||
}
|
|
@ -42,19 +42,23 @@ func (s *NetStats) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
counts[netcon.Status] = c + 1
|
||||
}
|
||||
acc.Add("tcp_established", counts["ESTABLISHED"], tags)
|
||||
acc.Add("tcp_syn_sent", counts["SYN_SENT"], tags)
|
||||
acc.Add("tcp_syn_recv", counts["SYN_RECV"], tags)
|
||||
acc.Add("tcp_fin_wait1", counts["FIN_WAIT1"], tags)
|
||||
acc.Add("tcp_fin_wait2", counts["FIN_WAIT2"], tags)
|
||||
acc.Add("tcp_time_wait", counts["TIME_WAIT"], tags)
|
||||
acc.Add("tcp_close", counts["CLOSE"], tags)
|
||||
acc.Add("tcp_close_wait", counts["CLOSE_WAIT"], tags)
|
||||
acc.Add("tcp_last_ack", counts["LAST_ACK"], tags)
|
||||
acc.Add("tcp_listen", counts["LISTEN"], tags)
|
||||
acc.Add("tcp_closing", counts["CLOSING"], tags)
|
||||
acc.Add("tcp_none", counts["NONE"], tags)
|
||||
acc.Add("udp_socket", counts["UDP"], tags)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"tcp_established": counts["ESTABLISHED"],
|
||||
"tcp_syn_sent": counts["SYN_SENT"],
|
||||
"tcp_syn_recv": counts["SYN_RECV"],
|
||||
"tcp_fin_wait1": counts["FIN_WAIT1"],
|
||||
"tcp_fin_wait2": counts["FIN_WAIT2"],
|
||||
"tcp_time_wait": counts["TIME_WAIT"],
|
||||
"tcp_close": counts["CLOSE"],
|
||||
"tcp_close_wait": counts["CLOSE_WAIT"],
|
||||
"tcp_last_ack": counts["LAST_ACK"],
|
||||
"tcp_listen": counts["LISTEN"],
|
||||
"tcp_closing": counts["CLOSING"],
|
||||
"tcp_none": counts["NONE"],
|
||||
"udp_socket": counts["UDP"],
|
||||
}
|
||||
acc.AddFields("netstat", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,12 +1,16 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
gonet "net"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
dc "github.com/fsouza/go-dockerclient"
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
|
@ -14,6 +18,8 @@ import (
|
|||
"github.com/shirou/gopsutil/docker"
|
||||
"github.com/shirou/gopsutil/mem"
|
||||
"github.com/shirou/gopsutil/net"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type DockerContainerStat struct {
|
||||
|
@ -166,3 +172,49 @@ func (s *systemPS) DockerStat() ([]*DockerContainerStat, error) {
|
|||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// Asserts that a given accumulator contains a measurment of type float64 with
|
||||
// specific tags within a certain distance of a given expected value. Asserts a failure
|
||||
// if the measurement is of the wrong type, or if no matching measurements are found
|
||||
//
|
||||
// Paramaters:
|
||||
// t *testing.T : Testing object to use
|
||||
// acc testutil.Accumulator: Accumulator to examine
|
||||
// measurement string : Name of the measurement to examine
|
||||
// expectedValue float64 : Value to search for within the measurement
|
||||
// delta float64 : Maximum acceptable distance of an accumulated value
|
||||
// from the expectedValue parameter. Useful when
|
||||
// floating-point arithmatic imprecision makes looking
|
||||
// for an exact match impractical
|
||||
// tags map[string]string : Tag set the found measurement must have. Set to nil to
|
||||
// ignore the tag set.
|
||||
func assertContainsTaggedFloat(
|
||||
t *testing.T,
|
||||
acc *testutil.Accumulator,
|
||||
measurement string,
|
||||
expectedValue float64,
|
||||
delta float64,
|
||||
tags map[string]string,
|
||||
) {
|
||||
var actualValue float64
|
||||
for _, pt := range acc.Points {
|
||||
if pt.Measurement == measurement {
|
||||
if (tags == nil) || reflect.DeepEqual(pt.Tags, tags) {
|
||||
if value, ok := pt.Fields["value"].(float64); ok {
|
||||
actualValue = value
|
||||
if (value >= expectedValue-delta) && (value <= expectedValue+delta) {
|
||||
// Found the point, return without failing
|
||||
return
|
||||
}
|
||||
} else {
|
||||
assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64",
|
||||
measurement))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
msg := fmt.Sprintf("Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f",
|
||||
measurement, delta, expectedValue, actualValue)
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
|
|
|
@ -19,13 +19,6 @@ func (_ *SystemStats) Description() string {
|
|||
|
||||
func (_ *SystemStats) SampleConfig() string { return "" }
|
||||
|
||||
func (_ *SystemStats) add(acc plugins.Accumulator,
|
||||
name string, val float64, tags map[string]string) {
|
||||
if val >= 0 {
|
||||
acc.Add(name, val, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (_ *SystemStats) Gather(acc plugins.Accumulator) error {
|
||||
loadavg, err := load.LoadAvg()
|
||||
if err != nil {
|
||||
|
@ -37,11 +30,14 @@ func (_ *SystemStats) Gather(acc plugins.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
acc.Add("load1", loadavg.Load1, nil)
|
||||
acc.Add("load5", loadavg.Load5, nil)
|
||||
acc.Add("load15", loadavg.Load15, nil)
|
||||
acc.Add("uptime", float64(hostinfo.Uptime), nil)
|
||||
acc.Add("uptime_format", format_uptime(hostinfo.Uptime), nil)
|
||||
fields := map[string]interface{}{
|
||||
"load1": loadavg.Load1,
|
||||
"load5": loadavg.Load5,
|
||||
"load15": loadavg.Load15,
|
||||
"uptime": hostinfo.Uptime,
|
||||
"uptime_format": format_uptime(hostinfo.Uptime),
|
||||
}
|
||||
acc.AddFields("system", fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,426 +0,0 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
"github.com/shirou/gopsutil/disk"
|
||||
"github.com/shirou/gopsutil/mem"
|
||||
"github.com/shirou/gopsutil/net"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSystemStats_GenerateStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
|
||||
defer mps.AssertExpectations(t)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
cts := cpu.CPUTimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 3.1,
|
||||
System: 8.2,
|
||||
Idle: 80.1,
|
||||
Nice: 1.3,
|
||||
Iowait: 0.2,
|
||||
Irq: 0.1,
|
||||
Softirq: 0.11,
|
||||
Steal: 0.0511,
|
||||
Guest: 8.1,
|
||||
GuestNice: 0.324,
|
||||
}
|
||||
|
||||
cts2 := cpu.CPUTimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 11.4, // increased by 8.3
|
||||
System: 10.9, // increased by 2.7
|
||||
Idle: 158.8699, // increased by 78.7699 (for total increase of 100)
|
||||
Nice: 2.5, // increased by 1.2
|
||||
Iowait: 0.7, // increased by 0.5
|
||||
Irq: 1.2, // increased by 1.1
|
||||
Softirq: 0.31, // increased by 0.2
|
||||
Steal: 0.2812, // increased by 0.0001
|
||||
Guest: 12.9, // increased by 4.8
|
||||
GuestNice: 2.524, // increased by 2.2
|
||||
}
|
||||
|
||||
mps.On("CPUTimes").Return([]cpu.CPUTimesStat{cts}, nil)
|
||||
|
||||
du := []*disk.DiskUsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Fstype: "ext4",
|
||||
Total: 128,
|
||||
Free: 23,
|
||||
InodesTotal: 1234,
|
||||
InodesFree: 234,
|
||||
},
|
||||
{
|
||||
Path: "/home",
|
||||
Fstype: "ext4",
|
||||
Total: 256,
|
||||
Free: 46,
|
||||
InodesTotal: 2468,
|
||||
InodesFree: 468,
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("DiskUsage").Return(du, nil)
|
||||
|
||||
diskio1 := disk.DiskIOCountersStat{
|
||||
|
||||
ReadCount: 888,
|
||||
WriteCount: 5341,
|
||||
ReadBytes: 100000,
|
||||
WriteBytes: 200000,
|
||||
ReadTime: 7123,
|
||||
WriteTime: 9087,
|
||||
Name: "sda1",
|
||||
IoTime: 123552,
|
||||
SerialNumber: "ab-123-ad",
|
||||
}
|
||||
diskio2 := disk.DiskIOCountersStat{
|
||||
ReadCount: 444,
|
||||
WriteCount: 2341,
|
||||
ReadBytes: 200000,
|
||||
WriteBytes: 400000,
|
||||
ReadTime: 3123,
|
||||
WriteTime: 6087,
|
||||
Name: "sdb1",
|
||||
IoTime: 246552,
|
||||
SerialNumber: "bb-123-ad",
|
||||
}
|
||||
|
||||
mps.On("DiskIO").Return(map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2}, nil)
|
||||
|
||||
netio := net.NetIOCountersStat{
|
||||
Name: "eth0",
|
||||
BytesSent: 1123,
|
||||
BytesRecv: 8734422,
|
||||
PacketsSent: 781,
|
||||
PacketsRecv: 23456,
|
||||
Errin: 832,
|
||||
Errout: 8,
|
||||
Dropin: 7,
|
||||
Dropout: 1,
|
||||
}
|
||||
|
||||
mps.On("NetIO").Return([]net.NetIOCountersStat{netio}, nil)
|
||||
|
||||
netprotos := []net.NetProtoCountersStat{
|
||||
net.NetProtoCountersStat{
|
||||
Protocol: "Udp",
|
||||
Stats: map[string]int64{
|
||||
"InDatagrams": 4655,
|
||||
"NoPorts": 892592,
|
||||
},
|
||||
},
|
||||
}
|
||||
mps.On("NetProto").Return(netprotos, nil)
|
||||
|
||||
vms := &mem.VirtualMemoryStat{
|
||||
Total: 12400,
|
||||
Available: 7600,
|
||||
Used: 5000,
|
||||
Free: 1235,
|
||||
// Active: 8134,
|
||||
// Inactive: 1124,
|
||||
// Buffers: 771,
|
||||
// Cached: 4312,
|
||||
// Wired: 134,
|
||||
// Shared: 2142,
|
||||
}
|
||||
|
||||
mps.On("VMStat").Return(vms, nil)
|
||||
|
||||
sms := &mem.SwapMemoryStat{
|
||||
Total: 8123,
|
||||
Used: 1232,
|
||||
Free: 6412,
|
||||
UsedPercent: 12.2,
|
||||
Sin: 7,
|
||||
Sout: 830,
|
||||
}
|
||||
|
||||
mps.On("SwapStat").Return(sms, nil)
|
||||
|
||||
netstats := []net.NetConnectionStat{
|
||||
net.NetConnectionStat{
|
||||
Type: syscall.SOCK_DGRAM,
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "CLOSE",
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("NetConnections").Return(netstats, nil)
|
||||
|
||||
cs := NewCPUStats(&mps)
|
||||
|
||||
cputags := map[string]string{
|
||||
"cpu": "cpu0",
|
||||
}
|
||||
|
||||
preCPUPoints := len(acc.Points)
|
||||
err := cs.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
numCPUPoints := len(acc.Points) - preCPUPoints
|
||||
|
||||
expectedCPUPoints := 10
|
||||
assert.Equal(t, expectedCPUPoints, numCPUPoints)
|
||||
|
||||
// Computed values are checked with delta > 0 becasue of floating point arithmatic
|
||||
// imprecision
|
||||
assertContainsTaggedFloat(t, &acc, "time_user", 3.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_iowait", 0.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_irq", 0.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest", 8.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0, cputags)
|
||||
|
||||
mps2 := MockPS{}
|
||||
mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil)
|
||||
cs.ps = &mps2
|
||||
|
||||
// Should have added cpu percentages too
|
||||
err = cs.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numCPUPoints = len(acc.Points) - (preCPUPoints + numCPUPoints)
|
||||
expectedCPUPoints = 20
|
||||
assert.Equal(t, expectedCPUPoints, numCPUPoints)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "time_user", 11.4, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_idle", 158.8699, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_nice", 2.5, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_iowait", 0.7, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest", 12.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0, cputags)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "usage_user", 8.3, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_idle", 78.7699, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_nice", 1.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.5, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_irq", 1.1, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_guest", 4.8, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005, cputags)
|
||||
|
||||
preDiskPoints := len(acc.Points)
|
||||
|
||||
err = (&DiskStats{ps: &mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numDiskPoints := len(acc.Points) - preDiskPoints
|
||||
expectedAllDiskPoints := 12
|
||||
assert.Equal(t, expectedAllDiskPoints, numDiskPoints)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"path": "/",
|
||||
"fstype": "ext4",
|
||||
}
|
||||
tags2 := map[string]string{
|
||||
"path": "/home",
|
||||
"fstype": "ext4",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("total", uint64(128), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("used", uint64(105), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("free", uint64(23), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(1234), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(234), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(1000), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("total", uint64(256), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("used", uint64(210), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("free", uint64(46), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(2468), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(468), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(2000), tags2))
|
||||
|
||||
// We expect 6 more DiskPoints to show up with an explicit match on "/"
|
||||
// and /home not matching the /dev in Mountpoints
|
||||
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc)
|
||||
assert.Equal(t, preDiskPoints+expectedAllDiskPoints+6, len(acc.Points))
|
||||
|
||||
// We should see all the diskpoints as Mountpoints includes both
|
||||
// / and /home
|
||||
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc)
|
||||
assert.Equal(t, preDiskPoints+2*expectedAllDiskPoints+6, len(acc.Points))
|
||||
|
||||
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
ntags := map[string]string{
|
||||
"interface": "eth0",
|
||||
}
|
||||
|
||||
assert.NoError(t, acc.ValidateTaggedValue("bytes_sent", uint64(1123), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("bytes_recv", uint64(8734422), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("packets_sent", uint64(781), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("packets_recv", uint64(23456), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("err_in", uint64(832), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("err_out", uint64(8), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("drop_in", uint64(7), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("drop_out", uint64(1), ntags))
|
||||
assert.NoError(t, acc.ValidateValue("udp_noports", int64(892592)))
|
||||
assert.NoError(t, acc.ValidateValue("udp_indatagrams", int64(4655)))
|
||||
|
||||
preDiskIOPoints := len(acc.Points)
|
||||
|
||||
err = (&DiskIOStats{ps: &mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numDiskIOPoints := len(acc.Points) - preDiskIOPoints
|
||||
expectedAllDiskIOPoints := 14
|
||||
assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints)
|
||||
|
||||
dtags1 := map[string]string{
|
||||
"name": "sda1",
|
||||
"serial": "ab-123-ad",
|
||||
}
|
||||
dtags2 := map[string]string{
|
||||
"name": "sdb1",
|
||||
"serial": "bb-123-ad",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2))
|
||||
|
||||
// We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1"
|
||||
// and serial should be missing from the tags with SkipSerialNumber set
|
||||
err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc)
|
||||
assert.Equal(t, preDiskIOPoints+expectedAllDiskIOPoints+7, len(acc.Points))
|
||||
|
||||
dtags3 := map[string]string{
|
||||
"name": "sdb1",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3))
|
||||
|
||||
err = (&MemStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
vmtags := map[string]string(nil)
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("total", uint64(12400), vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("available", uint64(7600), vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("used", uint64(5000), vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("available_percent",
|
||||
float64(7600)/float64(12400)*100,
|
||||
vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("used_percent",
|
||||
float64(5000)/float64(12400)*100,
|
||||
vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("free", uint64(1235), vmtags))
|
||||
|
||||
acc.Points = nil
|
||||
|
||||
err = (&SwapStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
swaptags := map[string]string(nil)
|
||||
|
||||
assert.NoError(t, acc.ValidateTaggedValue("total", uint64(8123), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("used", uint64(1232), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("used_percent", float64(12.2), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("free", uint64(6412), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("in", uint64(7), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("out", uint64(830), swaptags))
|
||||
|
||||
acc.Points = nil
|
||||
|
||||
err = (&NetStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
netstattags := map[string]string(nil)
|
||||
|
||||
assert.NoError(t, acc.ValidateTaggedValue("tcp_established", 2, netstattags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("tcp_close", 1, netstattags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("udp_socket", 1, netstattags))
|
||||
|
||||
}
|
||||
|
||||
// Asserts that a given accumulator contains a measurment of type float64 with
|
||||
// specific tags within a certain distance of a given expected value. Asserts a failure
|
||||
// if the measurement is of the wrong type, or if no matching measurements are found
|
||||
//
|
||||
// Paramaters:
|
||||
// t *testing.T : Testing object to use
|
||||
// acc testutil.Accumulator: Accumulator to examine
|
||||
// measurement string : Name of the measurement to examine
|
||||
// expectedValue float64 : Value to search for within the measurement
|
||||
// delta float64 : Maximum acceptable distance of an accumulated value
|
||||
// from the expectedValue parameter. Useful when
|
||||
// floating-point arithmatic imprecision makes looking
|
||||
// for an exact match impractical
|
||||
// tags map[string]string : Tag set the found measurement must have. Set to nil to
|
||||
// ignore the tag set.
|
||||
func assertContainsTaggedFloat(
|
||||
t *testing.T,
|
||||
acc *testutil.Accumulator,
|
||||
measurement string,
|
||||
expectedValue float64,
|
||||
delta float64,
|
||||
tags map[string]string,
|
||||
) {
|
||||
var actualValue float64
|
||||
for _, pt := range acc.Points {
|
||||
if pt.Measurement == measurement {
|
||||
if (tags == nil) || reflect.DeepEqual(pt.Tags, tags) {
|
||||
if value, ok := pt.Fields["value"].(float64); ok {
|
||||
actualValue = value
|
||||
if (value >= expectedValue-delta) && (value <= expectedValue+delta) {
|
||||
// Found the point, return without failing
|
||||
return
|
||||
}
|
||||
} else {
|
||||
assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64",
|
||||
measurement))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
msg := fmt.Sprintf("Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f",
|
||||
measurement, delta, expectedValue, actualValue)
|
||||
assert.Fail(t, msg)
|
||||
}
|
|
@ -41,6 +41,5 @@ func (s *Trig) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
|
||||
func init() {
|
||||
|
||||
plugins.Add("Trig", func() plugins.Plugin { return &Trig{x: 0.0} })
|
||||
}
|
||||
|
|
|
@ -5,28 +5,21 @@ import (
|
|||
"errors"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type Twemproxy struct {
|
||||
Instances []TwemproxyInstance
|
||||
}
|
||||
|
||||
type TwemproxyInstance struct {
|
||||
Addr string
|
||||
Pools []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
[[plugins.twemproxy.instances]]
|
||||
# Twemproxy stats address and port (no scheme)
|
||||
addr = "localhost:22222"
|
||||
# Monitor pool name
|
||||
pools = ["redis_pool", "mc_pool"]
|
||||
# Twemproxy stats address and port (no scheme)
|
||||
addr = "localhost:22222"
|
||||
# Monitor pool name
|
||||
pools = ["redis_pool", "mc_pool"]
|
||||
`
|
||||
|
||||
func (t *Twemproxy) SampleConfig() string {
|
||||
|
@ -39,35 +32,7 @@ func (t *Twemproxy) Description() string {
|
|||
|
||||
// Gather data from all Twemproxy instances
|
||||
func (t *Twemproxy) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
errorChan := make(chan error, len(t.Instances))
|
||||
for _, inst := range t.Instances {
|
||||
wg.Add(1)
|
||||
go func(inst TwemproxyInstance) {
|
||||
defer wg.Done()
|
||||
if err := inst.Gather(acc); err != nil {
|
||||
errorChan <- err
|
||||
}
|
||||
}(inst)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
close(errorChan)
|
||||
errs := []string{}
|
||||
for err := range errorChan {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
|
||||
// Gather data from one Twemproxy
|
||||
func (ti *TwemproxyInstance) Gather(
|
||||
acc plugins.Accumulator,
|
||||
) error {
|
||||
conn, err := net.DialTimeout("tcp", ti.Addr, 1*time.Second)
|
||||
conn, err := net.DialTimeout("tcp", t.Addr, 1*time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -82,14 +47,14 @@ func (ti *TwemproxyInstance) Gather(
|
|||
}
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["twemproxy"] = ti.Addr
|
||||
ti.processStat(acc, tags, stats)
|
||||
tags["twemproxy"] = t.Addr
|
||||
t.processStat(acc, tags, stats)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Process Twemproxy server stats
|
||||
func (ti *TwemproxyInstance) processStat(
|
||||
func (t *Twemproxy) processStat(
|
||||
acc plugins.Accumulator,
|
||||
tags map[string]string,
|
||||
data map[string]interface{},
|
||||
|
@ -100,40 +65,42 @@ func (ti *TwemproxyInstance) processStat(
|
|||
}
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
metrics := []string{"total_connections", "curr_connections", "timestamp"}
|
||||
for _, m := range metrics {
|
||||
if value, ok := data[m]; ok {
|
||||
if val, ok := value.(float64); ok {
|
||||
acc.Add(m, val, tags)
|
||||
fields[m] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("twemproxy", fields, tags)
|
||||
|
||||
for _, pool := range ti.Pools {
|
||||
for _, pool := range t.Pools {
|
||||
if poolStat, ok := data[pool]; ok {
|
||||
if data, ok := poolStat.(map[string]interface{}); ok {
|
||||
poolTags := copyTags(tags)
|
||||
poolTags["pool"] = pool
|
||||
ti.processPool(acc, poolTags, pool+"_", data)
|
||||
t.processPool(acc, poolTags, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process pool data in Twemproxy stats
|
||||
func (ti *TwemproxyInstance) processPool(
|
||||
func (t *Twemproxy) processPool(
|
||||
acc plugins.Accumulator,
|
||||
tags map[string]string,
|
||||
prefix string,
|
||||
data map[string]interface{},
|
||||
) {
|
||||
serverTags := make(map[string]map[string]string)
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range data {
|
||||
switch key {
|
||||
case "client_connections", "forward_error", "client_err", "server_ejects", "fragments", "client_eof":
|
||||
if val, ok := value.(float64); ok {
|
||||
acc.Add(prefix+key, val, tags)
|
||||
fields[key] = val
|
||||
}
|
||||
default:
|
||||
if data, ok := value.(map[string]interface{}); ok {
|
||||
|
@ -141,27 +108,29 @@ func (ti *TwemproxyInstance) processPool(
|
|||
serverTags[key] = copyTags(tags)
|
||||
serverTags[key]["server"] = key
|
||||
}
|
||||
ti.processServer(acc, serverTags[key], prefix, data)
|
||||
t.processServer(acc, serverTags[key], data)
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("twemproxy_pool", fields, tags)
|
||||
}
|
||||
|
||||
// Process backend server(redis/memcached) stats
|
||||
func (ti *TwemproxyInstance) processServer(
|
||||
func (t *Twemproxy) processServer(
|
||||
acc plugins.Accumulator,
|
||||
tags map[string]string,
|
||||
prefix string,
|
||||
data map[string]interface{},
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range data {
|
||||
switch key {
|
||||
default:
|
||||
if val, ok := value.(float64); ok {
|
||||
acc.Add(prefix+key, val, tags)
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("twemproxy_pool", fields, tags)
|
||||
}
|
||||
|
||||
// Tags is not expected to be mutated after passing to Add.
|
||||
|
|
|
@ -88,15 +88,15 @@ func gatherPoolStats(pool poolInfo, acc plugins.Accumulator) error {
|
|||
}
|
||||
|
||||
tag := map[string]string{"pool": pool.name}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for i := 0; i < keyCount; i++ {
|
||||
value, err := strconv.ParseInt(values[i], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acc.Add(keys[i], value, tag)
|
||||
fields[keys[i]] = value
|
||||
}
|
||||
acc.AddFields("zfs_pool", fields, tag)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -124,6 +124,7 @@ func (z *Zfs) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for _, metric := range kstatMetrics {
|
||||
lines, err := internal.ReadLines(kstatPath + "/" + metric)
|
||||
if err != nil {
|
||||
|
@ -140,9 +141,10 @@ func (z *Zfs) Gather(acc plugins.Accumulator) error {
|
|||
key := metric + "_" + rawData[0]
|
||||
rawValue := rawData[len(rawData)-1]
|
||||
value, _ := strconv.ParseInt(rawValue, 10, 64)
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("zfs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -67,35 +67,37 @@ func (z *Zookeeper) gatherServer(address string, acc plugins.Accumulator) error
|
|||
defer c.Close()
|
||||
|
||||
fmt.Fprintf(c, "%s\n", "mntr")
|
||||
|
||||
rdr := bufio.NewReader(c)
|
||||
|
||||
scanner := bufio.NewScanner(rdr)
|
||||
|
||||
service := strings.Split(address, ":")
|
||||
if len(service) != 2 {
|
||||
return fmt.Errorf("Invalid service address: %s", address)
|
||||
}
|
||||
tags := map[string]string{"server": service[0], "port": service[1]}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
re := regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`)
|
||||
parts := re.FindStringSubmatch(string(line))
|
||||
|
||||
service := strings.Split(address, ":")
|
||||
|
||||
if len(parts) != 3 || len(service) != 2 {
|
||||
if len(parts) != 3 {
|
||||
return fmt.Errorf("unexpected line in mntr response: %q", line)
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": service[0], "port": service[1]}
|
||||
|
||||
measurement := strings.TrimPrefix(parts[1], "zk_")
|
||||
sValue := string(parts[2])
|
||||
|
||||
iVal, err := strconv.ParseInt(sValue, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add(measurement, iVal, tags)
|
||||
fields[measurement] = iVal
|
||||
} else {
|
||||
acc.Add(measurement, sValue, tags)
|
||||
fields[measurement] = sValue
|
||||
}
|
||||
}
|
||||
acc.AddFields("zookeeper", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue