Merge ab00925fc4
into ee8d99b955
This commit is contained in:
commit
ec66db5631
22
CHANGELOG.md
22
CHANGELOG.md
|
@ -1,3 +1,25 @@
|
|||
## v0.3.0 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
- **breaking change** the `io` plugin has been renamed `diskio`
|
||||
- **breaking change** Plugin measurements aggregated into a single measurement.
|
||||
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
|
||||
for configuration.
|
||||
- `twemproxy` plugin: `prefix` option removed.
|
||||
- `procstat` cpu measurements are now prepended with `cpu_time_` instead of
|
||||
only `cpu_`
|
||||
- The prometheus plugin schema has not been changed (measurements have not been
|
||||
aggregated).
|
||||
|
||||
### Features
|
||||
- Plugin measurements aggregated into a single measurement.
|
||||
- Added ability to specify per-plugin tags
|
||||
- Added ability to specify per-plugin measurement suffix and prefix.
|
||||
(`name_prefix` and `name_suffix`)
|
||||
- Added ability to override base plugin name. (`name_override`)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.2.5 [unreleased]
|
||||
|
||||
### Features
|
||||
|
|
|
@ -0,0 +1,177 @@
|
|||
# Telegraf Configuration
|
||||
|
||||
## Plugin Configuration
|
||||
|
||||
There are some configuration options that are configurable per plugin:
|
||||
|
||||
* **name_override**: Override the base name of the measurement.
|
||||
(Default is the name of the plugin).
|
||||
* **name_prefix**: Specifies a prefix to attach to the measurement name.
|
||||
* **name_suffix**: Specifies a suffix to attach to the measurement name.
|
||||
* **tags**: A map of tags to apply to a specific plugin's measurements.
|
||||
|
||||
### Plugin Filters
|
||||
|
||||
There are also filters that can be configured per plugin:
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the
|
||||
current plugin. Each string in the array is tested as a glob match against field names
|
||||
and if it matches, the field is emitted.
|
||||
* **drop**: The inverse of pass, if a field name matches, it is not emitted.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter
|
||||
measurements by the current plugin. Each string in the array is tested as a glob
|
||||
match against the tag name, and if it matches the measurement is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not emitted.
|
||||
This is tested on measurements that have passed the tagpass test.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular plugin should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
### Plugin Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||
fields which begin with `time_`.
|
||||
|
||||
```toml
|
||||
[tags]
|
||||
dc = "denver-1"
|
||||
|
||||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[outputs]
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# PLUGINS
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
# filter all fields beginning with 'time_'
|
||||
drop = ["time_*"]
|
||||
```
|
||||
|
||||
### Plugin Config: tagpass and tagdrop
|
||||
|
||||
```toml
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[plugins.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[plugins.disk]]
|
||||
[plugins.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
fstype = [ "ext4", "xfs" ]
|
||||
# Globs can also be used on the tag values
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
### Plugin Config: pass and drop
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest & steal CPU usage
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
drop = ["usage_guest", "usage_steal"]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[plugins.disk]]
|
||||
pass = ["inodes*"]
|
||||
```
|
||||
|
||||
### Plugin config: prefix, suffix, and override
|
||||
|
||||
This plugin will emit measurements with the name `cpu_total`
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
name_suffix = "_total"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
This will emit measurements with the name `foobar`
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
name_override = "foobar"
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
```
|
||||
|
||||
### Plugin config: tags
|
||||
|
||||
This plugin will emit measurements with two additional tags: `tag1=foo` and
|
||||
`tag2=bar`
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
[plugins.cpu.tags]
|
||||
tag1 = "foo"
|
||||
tag2 = "bar"
|
||||
```
|
||||
|
||||
### Multiple plugins of the same type
|
||||
|
||||
Additional plugins (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file:
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
## Output Configuration
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
Outputs also support the same configurable options as plugins
|
||||
(pass, drop, tagpass, tagdrop), added in 0.2.4
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
drop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
pass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
133
README.md
133
README.md
|
@ -116,99 +116,10 @@ unit parser, e.g. "10s" for 10 seconds or "5m" for 5 minutes.
|
|||
* **debug**: Set to true to gather and send metrics to STDOUT as well as
|
||||
InfluxDB.
|
||||
|
||||
## Plugin Options
|
||||
## Configuration
|
||||
|
||||
There are 5 configuration options that are configurable per plugin:
|
||||
|
||||
* **pass**: An array of strings that is used to filter metrics generated by the
|
||||
current plugin. Each string in the array is tested as a glob match against metric names
|
||||
and if it matches, the metric is emitted.
|
||||
* **drop**: The inverse of pass, if a metric name matches, it is not emitted.
|
||||
* **tagpass**: tag names and arrays of strings that are used to filter metrics by the current plugin. Each string in the array is tested as a glob match against
|
||||
the tag name, and if it matches the metric is emitted.
|
||||
* **tagdrop**: The inverse of tagpass. If a tag matches, the metric is not emitted.
|
||||
This is tested on metrics that have passed the tagpass test.
|
||||
* **interval**: How often to gather this metric. Normal plugins use a single
|
||||
global interval, but if one particular plugin should be run less or more often,
|
||||
you can configure that here.
|
||||
|
||||
### Plugin Configuration Examples
|
||||
|
||||
This is a full working config that will output CPU data to an InfluxDB instance
|
||||
at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output
|
||||
measurements at a 10s interval and will collect per-cpu data, dropping any
|
||||
measurements which begin with `cpu_time`.
|
||||
|
||||
```toml
|
||||
[tags]
|
||||
dc = "denver-1"
|
||||
|
||||
[agent]
|
||||
interval = "10s"
|
||||
|
||||
# OUTPUTS
|
||||
[outputs]
|
||||
[[outputs.influxdb]]
|
||||
url = "http://192.168.59.103:8086" # required.
|
||||
database = "telegraf" # required.
|
||||
precision = "s"
|
||||
|
||||
# PLUGINS
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
|
||||
Below is how to configure `tagpass` and `tagdrop` parameters
|
||||
|
||||
```toml
|
||||
[plugins]
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time"]
|
||||
# Don't collect CPU data for cpu6 & cpu7
|
||||
[plugins.cpu.tagdrop]
|
||||
cpu = [ "cpu6", "cpu7" ]
|
||||
|
||||
[[plugins.disk]]
|
||||
[plugins.disk.tagpass]
|
||||
# tagpass conditions are OR, not AND.
|
||||
# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home)
|
||||
# then the metric passes
|
||||
fstype = [ "ext4", "xfs" ]
|
||||
# Globs can also be used on the tag values
|
||||
path = [ "/opt", "/home*" ]
|
||||
```
|
||||
|
||||
Below is how to configure `pass` and `drop` parameters
|
||||
|
||||
```toml
|
||||
# Drop all metrics for guest CPU usage
|
||||
[[plugins.cpu]]
|
||||
drop = [ "cpu_usage_guest" ]
|
||||
|
||||
# Only store inode related metrics for disks
|
||||
[[plugins.disk]]
|
||||
pass = [ "disk_inodes*" ]
|
||||
```
|
||||
|
||||
|
||||
Additional plugins (or outputs) of the same type can be specified,
|
||||
just define more instances in the config file:
|
||||
|
||||
```toml
|
||||
[[plugins.cpu]]
|
||||
percpu = false
|
||||
totalcpu = true
|
||||
|
||||
[[plugins.cpu]]
|
||||
percpu = true
|
||||
totalcpu = false
|
||||
drop = ["cpu_time*"]
|
||||
```
|
||||
See the [configuration guide](CONFIGURATION.md) for a rundown of the more advanced
|
||||
configuration options.
|
||||
|
||||
## Supported Plugins
|
||||
|
||||
|
@ -226,7 +137,7 @@ Telegraf currently has support for collecting metrics from:
|
|||
* haproxy
|
||||
* httpjson (generic JSON-emitting http service plugin)
|
||||
* influxdb
|
||||
* jolokia (remote JMX with JSON over HTTP)
|
||||
* jolokia
|
||||
* leofs
|
||||
* lustre2
|
||||
* mailchimp
|
||||
|
@ -249,10 +160,10 @@ Telegraf currently has support for collecting metrics from:
|
|||
* system
|
||||
* cpu
|
||||
* mem
|
||||
* io
|
||||
* net
|
||||
* netstat
|
||||
* disk
|
||||
* diskio
|
||||
* swap
|
||||
|
||||
## Supported Service Plugins
|
||||
|
@ -265,40 +176,6 @@ Telegraf can collect metrics via the following services:
|
|||
We'll be adding support for many more over the coming months. Read on if you
|
||||
want to add support for another service or third-party API.
|
||||
|
||||
## Output options
|
||||
|
||||
Telegraf also supports specifying multiple output sinks to send data to,
|
||||
configuring each output sink is different, but examples can be
|
||||
found by running `telegraf -sample-config`.
|
||||
|
||||
Outputs also support the same configurable options as plugins
|
||||
(pass, drop, tagpass, tagdrop), added in 0.2.4
|
||||
|
||||
```toml
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf"
|
||||
precision = "s"
|
||||
# Drop all measurements that start with "aerospike"
|
||||
drop = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-aerospike-data"
|
||||
precision = "s"
|
||||
# Only accept aerospike data:
|
||||
pass = ["aerospike*"]
|
||||
|
||||
[[outputs.influxdb]]
|
||||
urls = [ "http://localhost:8086" ]
|
||||
database = "telegraf-cpu0-data"
|
||||
precision = "s"
|
||||
# Only store measurements where the tag "cpu" matches the value "cpu0"
|
||||
[outputs.influxdb.tagpass]
|
||||
cpu = ["cpu0"]
|
||||
```
|
||||
|
||||
|
||||
## Supported Outputs
|
||||
|
||||
* influxdb
|
||||
|
|
|
@ -69,30 +69,72 @@ func (ac *accumulator) AddFields(
|
|||
tags map[string]string,
|
||||
t ...time.Time,
|
||||
) {
|
||||
// Validate uint64 and float64 fields
|
||||
if !ac.pluginConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
|
||||
// Override measurement name if set
|
||||
if len(ac.pluginConfig.NameOverride) != 0 {
|
||||
measurement = ac.pluginConfig.NameOverride
|
||||
}
|
||||
// Apply measurement prefix and suffix if set
|
||||
if len(ac.pluginConfig.MeasurementPrefix) != 0 {
|
||||
measurement = ac.pluginConfig.MeasurementPrefix + measurement
|
||||
}
|
||||
if len(ac.pluginConfig.MeasurementSuffix) != 0 {
|
||||
measurement = measurement + ac.pluginConfig.MeasurementSuffix
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
}
|
||||
// Apply plugin-wide tags if set
|
||||
for k, v := range ac.pluginConfig.Tags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
// Apply daemon-wide tags if set
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range fields {
|
||||
// Filter out any filtered fields
|
||||
if ac.pluginConfig != nil {
|
||||
if !ac.pluginConfig.Filter.ShouldPass(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[k] = v
|
||||
|
||||
// Validate uint64 and float64 fields
|
||||
switch val := v.(type) {
|
||||
case uint64:
|
||||
// InfluxDB does not support writing uint64
|
||||
if val < uint64(9223372036854775808) {
|
||||
fields[k] = int64(val)
|
||||
result[k] = int64(val)
|
||||
} else {
|
||||
fields[k] = int64(9223372036854775807)
|
||||
result[k] = int64(9223372036854775807)
|
||||
}
|
||||
case float64:
|
||||
// NaNs are invalid values in influxdb, skip measurement
|
||||
if math.IsNaN(val) || math.IsInf(val, 0) {
|
||||
if ac.debug {
|
||||
log.Printf("Measurement [%s] has a NaN or Inf field, skipping",
|
||||
measurement)
|
||||
log.Printf("Measurement [%s] field [%s] has a NaN or Inf "+
|
||||
"field, skipping",
|
||||
measurement, k)
|
||||
}
|
||||
return
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
tags = make(map[string]string)
|
||||
fields = nil
|
||||
if len(result) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var timestamp time.Time
|
||||
|
@ -106,19 +148,7 @@ func (ac *accumulator) AddFields(
|
|||
measurement = ac.prefix + measurement
|
||||
}
|
||||
|
||||
if ac.pluginConfig != nil {
|
||||
if !ac.pluginConfig.Filter.ShouldPass(measurement) || !ac.pluginConfig.Filter.ShouldTagsPass(tags) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range ac.defaultTags {
|
||||
if _, ok := tags[k]; !ok {
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
pt, err := client.NewPoint(measurement, tags, fields, timestamp)
|
||||
pt, err := client.NewPoint(measurement, tags, result, timestamp)
|
||||
if err != nil {
|
||||
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
|
||||
return
|
||||
|
|
6
agent.go
6
agent.go
|
@ -104,7 +104,7 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
|
|||
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.SetPrefix(plugin.Name + "_")
|
||||
// acc.SetPrefix(plugin.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
|
@ -141,7 +141,7 @@ func (a *Agent) gatherSeparate(
|
|||
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(a.Config.Agent.Debug)
|
||||
acc.SetPrefix(plugin.Name + "_")
|
||||
// acc.SetPrefix(plugin.Name + "_")
|
||||
acc.SetDefaultTags(a.Config.Tags)
|
||||
|
||||
if err := plugin.Plugin.Gather(acc); err != nil {
|
||||
|
@ -187,7 +187,7 @@ func (a *Agent) Test() error {
|
|||
for _, plugin := range a.Config.Plugins {
|
||||
acc := NewAccumulator(plugin.Config, pointChan)
|
||||
acc.SetDebug(true)
|
||||
acc.SetPrefix(plugin.Name + "_")
|
||||
// acc.SetPrefix(plugin.Name + "_")
|
||||
|
||||
fmt.Printf("* Plugin: %s, Collection 1\n", plugin.Name)
|
||||
if plugin.Config.Interval != 0 {
|
||||
|
|
|
@ -97,8 +97,8 @@
|
|||
# Mountpoints=["/"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
[[plugins.io]]
|
||||
# By default, telegraf will gather stats for all devices including
|
||||
[[plugins.diskio]]
|
||||
# By default, telegraf will gather stats for all devices including
|
||||
# disk partitions.
|
||||
# Setting devices will restrict the stats to the specified devices.
|
||||
# Devices=["sda","sdb"]
|
||||
|
|
|
@ -112,9 +112,13 @@ type Filter struct {
|
|||
|
||||
// PluginConfig containing a name, interval, and filter
|
||||
type PluginConfig struct {
|
||||
Name string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
Name string
|
||||
NameOverride string
|
||||
MeasurementPrefix string
|
||||
MeasurementSuffix string
|
||||
Tags map[string]string
|
||||
Filter Filter
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// OutputConfig containing name and filter
|
||||
|
@ -142,12 +146,12 @@ func (ro *RunningOutput) FilterPoints(points []*client.Point) []*client.Point {
|
|||
|
||||
// ShouldPass returns true if the metric should pass, false if should drop
|
||||
// based on the drop/pass filter parameters
|
||||
func (f Filter) ShouldPass(measurement string) bool {
|
||||
func (f Filter) ShouldPass(fieldkey string) bool {
|
||||
if f.Pass != nil {
|
||||
for _, pat := range f.Pass {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(measurement, pat) || internal.Glob(pat, measurement) {
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +162,7 @@ func (f Filter) ShouldPass(measurement string) bool {
|
|||
for _, pat := range f.Drop {
|
||||
// TODO remove HasPrefix check, leaving it for now for legacy support.
|
||||
// Cam, 2015-12-07
|
||||
if strings.HasPrefix(measurement, pat) || internal.Glob(pat, measurement) {
|
||||
if strings.HasPrefix(fieldkey, pat) || internal.Glob(pat, fieldkey) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -527,6 +531,11 @@ func (c *Config) addPlugin(name string, table *ast.Table) error {
|
|||
if len(c.PluginFilters) > 0 && !sliceContains(name, c.PluginFilters) {
|
||||
return nil
|
||||
}
|
||||
// Legacy support renaming io plugin to diskio
|
||||
if name == "io" {
|
||||
name = "diskio"
|
||||
}
|
||||
|
||||
creator, ok := plugins.Plugins[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Undefined but requested plugin: %s", name)
|
||||
|
@ -628,7 +637,8 @@ func buildFilter(tbl *ast.Table) Filter {
|
|||
return f
|
||||
}
|
||||
|
||||
// buildPlugin parses plugin specific items from the ast.Table, builds the filter and returns a
|
||||
// buildPlugin parses plugin specific items from the ast.Table,
|
||||
// builds the filter and returns a
|
||||
// PluginConfig to be inserted into RunningPlugin
|
||||
func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) {
|
||||
cp := &PluginConfig{Name: name}
|
||||
|
@ -644,10 +654,47 @@ func buildPlugin(name string, tbl *ast.Table) (*PluginConfig, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_prefix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementPrefix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_suffix"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.MeasurementSuffix = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node, ok := tbl.Fields["name_override"]; ok {
|
||||
if kv, ok := node.(*ast.KeyValue); ok {
|
||||
if str, ok := kv.Value.(*ast.String); ok {
|
||||
cp.NameOverride = str.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cp.Tags = make(map[string]string)
|
||||
if node, ok := tbl.Fields["tags"]; ok {
|
||||
if subtbl, ok := node.(*ast.Table); ok {
|
||||
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
|
||||
log.Printf("Could not parse tags for plugin %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(tbl.Fields, "name_prefix")
|
||||
delete(tbl.Fields, "name_suffix")
|
||||
delete(tbl.Fields, "name_override")
|
||||
delete(tbl.Fields, "interval")
|
||||
delete(tbl.Fields, "tags")
|
||||
cp.Filter = buildFilter(tbl)
|
||||
return cp, nil
|
||||
|
||||
}
|
||||
|
||||
// buildOutput parses output specific items from the ast.Table, builds the filter and returns an
|
||||
|
@ -659,5 +706,4 @@ func buildOutput(name string, tbl *ast.Table) (*OutputConfig, error) {
|
|||
Filter: buildFilter(tbl),
|
||||
}
|
||||
return oc, nil
|
||||
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ urls = ["http://localhost/server-status?auto"]
|
|||
drop = ["cpu_time"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
[[plugins.disk]]
|
||||
[[plugins.diskio]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics from one or many disque servers
|
||||
|
|
|
@ -3,6 +3,7 @@ package internal
|
|||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -27,6 +28,39 @@ func (d *Duration) UnmarshalTOML(b []byte) error {
|
|||
|
||||
var NotImplementedError = errors.New("not implemented yet")
|
||||
|
||||
type JSONFlattener struct {
|
||||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
// FlattenJSON flattens nested maps/interfaces into a fields map
|
||||
func (f *JSONFlattener) FlattenJSON(
|
||||
fieldname string,
|
||||
v interface{},
|
||||
) error {
|
||||
if f.Fields == nil {
|
||||
f.Fields = make(map[string]interface{})
|
||||
}
|
||||
fieldname = strings.Trim(fieldname, "_")
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
f.Fields[fieldname] = t
|
||||
case bool, string, []interface{}:
|
||||
// ignored types
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)",
|
||||
t, t, fieldname)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadLines reads contents from a file and splits them by new lines.
|
||||
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
|
||||
func ReadLines(filename string) ([]string, error) {
|
||||
|
|
|
@ -58,21 +58,26 @@ func (a *Amon) Write(points []*client.Point) error {
|
|||
return nil
|
||||
}
|
||||
ts := TimeSeries{}
|
||||
var tempSeries = make([]*Metric, len(points))
|
||||
var acceptablePoints = 0
|
||||
tempSeries := []*Metric{}
|
||||
metricCounter := 0
|
||||
|
||||
for _, pt := range points {
|
||||
metric := &Metric{
|
||||
Metric: strings.Replace(pt.Name(), "_", ".", -1),
|
||||
}
|
||||
if p, err := buildPoint(pt); err == nil {
|
||||
metric.Points[0] = p
|
||||
tempSeries[acceptablePoints] = metric
|
||||
acceptablePoints += 1
|
||||
mname := strings.Replace(pt.Name(), "_", ".", -1)
|
||||
if amonPts, err := buildPoints(pt); err == nil {
|
||||
for fieldName, amonPt := range amonPts {
|
||||
metric := &Metric{
|
||||
Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1),
|
||||
}
|
||||
metric.Points[0] = amonPt
|
||||
tempSeries = append(tempSeries, metric)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Metric for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
ts.Series = make([]*Metric, acceptablePoints)
|
||||
|
||||
ts.Series = make([]*Metric, metricCounter)
|
||||
copy(ts.Series, tempSeries[0:])
|
||||
tsBytes, err := json.Marshal(ts)
|
||||
if err != nil {
|
||||
|
@ -110,13 +115,17 @@ func (a *Amon) authenticatedUrl() string {
|
|||
return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey)
|
||||
}
|
||||
|
||||
func buildPoint(pt *client.Point) (Point, error) {
|
||||
var p Point
|
||||
if err := p.setValue(pt.Fields()["value"]); err != nil {
|
||||
return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
func buildPoints(pt *client.Point) (map[string]Point, error) {
|
||||
pts := make(map[string]Point)
|
||||
for k, v := range pt.Fields() {
|
||||
var p Point
|
||||
if err := p.setValue(v); err != nil {
|
||||
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
pts[k] = p
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
return p, nil
|
||||
return pts, nil
|
||||
}
|
||||
|
||||
func (p *Point) setValue(v interface{}) error {
|
||||
|
|
|
@ -67,23 +67,26 @@ func (d *Datadog) Write(points []*client.Point) error {
|
|||
return nil
|
||||
}
|
||||
ts := TimeSeries{}
|
||||
var tempSeries = make([]*Metric, len(points))
|
||||
var acceptablePoints = 0
|
||||
tempSeries := []*Metric{}
|
||||
metricCounter := 0
|
||||
|
||||
for _, pt := range points {
|
||||
metric := &Metric{
|
||||
Metric: strings.Replace(pt.Name(), "_", ".", -1),
|
||||
Tags: buildTags(pt.Tags()),
|
||||
Host: pt.Tags()["host"],
|
||||
}
|
||||
if p, err := buildPoint(pt); err == nil {
|
||||
metric.Points[0] = p
|
||||
tempSeries[acceptablePoints] = metric
|
||||
acceptablePoints += 1
|
||||
mname := strings.Replace(pt.Name(), "_", ".", -1)
|
||||
if amonPts, err := buildPoints(pt); err == nil {
|
||||
for fieldName, amonPt := range amonPts {
|
||||
metric := &Metric{
|
||||
Metric: mname + strings.Replace(fieldName, "_", ".", -1),
|
||||
}
|
||||
metric.Points[0] = amonPt
|
||||
tempSeries = append(tempSeries, metric)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Metric for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
ts.Series = make([]*Metric, acceptablePoints)
|
||||
|
||||
ts.Series = make([]*Metric, metricCounter)
|
||||
copy(ts.Series, tempSeries[0:])
|
||||
tsBytes, err := json.Marshal(ts)
|
||||
if err != nil {
|
||||
|
@ -123,13 +126,17 @@ func (d *Datadog) authenticatedUrl() string {
|
|||
return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode())
|
||||
}
|
||||
|
||||
func buildPoint(pt *client.Point) (Point, error) {
|
||||
var p Point
|
||||
if err := p.setValue(pt.Fields()["value"]); err != nil {
|
||||
return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
func buildPoints(pt *client.Point) (map[string]Point, error) {
|
||||
pts := make(map[string]Point)
|
||||
for k, v := range pt.Fields() {
|
||||
var p Point
|
||||
if err := p.setValue(v); err != nil {
|
||||
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
pts[k] = p
|
||||
}
|
||||
p[0] = float64(pt.Time().Unix())
|
||||
return p, nil
|
||||
return pts, nil
|
||||
}
|
||||
|
||||
func buildTags(ptTags map[string]string) []string {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"math/rand"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/influxdb/client/v2"
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
|
@ -110,6 +111,7 @@ func (i *InfluxDB) Connect() error {
|
|||
}
|
||||
|
||||
i.conns = conns
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -74,17 +74,21 @@ func (l *Librato) Write(points []*client.Point) error {
|
|||
return nil
|
||||
}
|
||||
metrics := Metrics{}
|
||||
var tempGauges = make([]*Gauge, len(points))
|
||||
var acceptablePoints = 0
|
||||
tempGauges := []*Gauge{}
|
||||
metricCounter := 0
|
||||
|
||||
for _, pt := range points {
|
||||
if gauge, err := l.buildGauge(pt); err == nil {
|
||||
tempGauges[acceptablePoints] = gauge
|
||||
acceptablePoints += 1
|
||||
if gauges, err := l.buildGauges(pt); err == nil {
|
||||
for _, gauge := range gauges {
|
||||
tempGauges = append(tempGauges, gauge)
|
||||
metricCounter++
|
||||
}
|
||||
} else {
|
||||
log.Printf("unable to build Gauge for %s, skipping\n", pt.Name())
|
||||
}
|
||||
}
|
||||
metrics.Gauges = make([]*Gauge, acceptablePoints)
|
||||
|
||||
metrics.Gauges = make([]*Gauge, metricCounter)
|
||||
copy(metrics.Gauges, tempGauges[0:])
|
||||
metricsBytes, err := json.Marshal(metrics)
|
||||
if err != nil {
|
||||
|
@ -118,22 +122,28 @@ func (l *Librato) Description() string {
|
|||
return "Configuration for Librato API to send metrics to."
|
||||
}
|
||||
|
||||
func (l *Librato) buildGauge(pt *client.Point) (*Gauge, error) {
|
||||
gauge := &Gauge{
|
||||
Name: pt.Name(),
|
||||
MeasureTime: pt.Time().Unix(),
|
||||
}
|
||||
if err := gauge.setValue(pt.Fields()["value"]); err != nil {
|
||||
return gauge, fmt.Errorf("unable to extract value from Fields, %s\n", err.Error())
|
||||
}
|
||||
if l.SourceTag != "" {
|
||||
if source, ok := pt.Tags()[l.SourceTag]; ok {
|
||||
gauge.Source = source
|
||||
} else {
|
||||
return gauge, fmt.Errorf("undeterminable Source type from Field, %s\n", l.SourceTag)
|
||||
func (l *Librato) buildGauges(pt *client.Point) ([]*Gauge, error) {
|
||||
gauges := []*Gauge{}
|
||||
for fieldName, value := range pt.Fields() {
|
||||
gauge := &Gauge{
|
||||
Name: pt.Name() + "_" + fieldName,
|
||||
MeasureTime: pt.Time().Unix(),
|
||||
}
|
||||
if err := gauge.setValue(value); err != nil {
|
||||
return gauges, fmt.Errorf("unable to extract value from Fields, %s\n",
|
||||
err.Error())
|
||||
}
|
||||
if l.SourceTag != "" {
|
||||
if source, ok := pt.Tags()[l.SourceTag]; ok {
|
||||
gauge.Source = source
|
||||
} else {
|
||||
return gauges,
|
||||
fmt.Errorf("undeterminable Source type from Field, %s\n",
|
||||
l.SourceTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
return gauge, nil
|
||||
return gauges, nil
|
||||
}
|
||||
|
||||
func (g *Gauge) setValue(v interface{}) error {
|
||||
|
|
|
@ -62,7 +62,8 @@ func (o *OpenTSDB) Write(points []*client.Point) error {
|
|||
if len(points) == 0 {
|
||||
return nil
|
||||
}
|
||||
var timeNow = time.Now()
|
||||
now := time.Now()
|
||||
|
||||
// Send Data with telnet / socket communication
|
||||
uri := fmt.Sprintf("%s:%d", o.Host, o.Port)
|
||||
tcpAddr, _ := net.ResolveTCPAddr("tcp", uri)
|
||||
|
@ -70,32 +71,21 @@ func (o *OpenTSDB) Write(points []*client.Point) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet connect fail")
|
||||
}
|
||||
defer connection.Close()
|
||||
|
||||
for _, pt := range points {
|
||||
metric := &MetricLine{
|
||||
Metric: fmt.Sprintf("%s%s", o.Prefix, pt.Name()),
|
||||
Timestamp: timeNow.Unix(),
|
||||
}
|
||||
|
||||
metricValue, buildError := buildValue(pt)
|
||||
if buildError != nil {
|
||||
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||
continue
|
||||
}
|
||||
metric.Value = metricValue
|
||||
|
||||
tagsSlice := buildTags(pt.Tags())
|
||||
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
|
||||
|
||||
messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
|
||||
if o.Debug {
|
||||
fmt.Print(messageLine)
|
||||
}
|
||||
_, err := connection.Write([]byte(messageLine))
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error())
|
||||
for _, metric := range buildMetrics(pt, now, o.Prefix) {
|
||||
messageLine := fmt.Sprintf("put %s %v %s %s\n",
|
||||
metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
|
||||
if o.Debug {
|
||||
fmt.Print(messageLine)
|
||||
}
|
||||
_, err := connection.Write([]byte(messageLine))
|
||||
if err != nil {
|
||||
return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
defer connection.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -111,9 +101,29 @@ func buildTags(ptTags map[string]string) []string {
|
|||
return tags
|
||||
}
|
||||
|
||||
func buildValue(pt *client.Point) (string, error) {
|
||||
func buildMetrics(pt *client.Point, now time.Time, prefix string) []*MetricLine {
|
||||
ret := []*MetricLine{}
|
||||
for fieldName, value := range pt.Fields() {
|
||||
metric := &MetricLine{
|
||||
Metric: fmt.Sprintf("%s%s_%s", prefix, pt.Name(), fieldName),
|
||||
Timestamp: now.Unix(),
|
||||
}
|
||||
|
||||
metricValue, buildError := buildValue(value)
|
||||
if buildError != nil {
|
||||
fmt.Printf("OpenTSDB: %s\n", buildError.Error())
|
||||
continue
|
||||
}
|
||||
metric.Value = metricValue
|
||||
tagsSlice := buildTags(pt.Tags())
|
||||
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
|
||||
ret = append(ret, metric)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func buildValue(v interface{}) (string, error) {
|
||||
var retv string
|
||||
var v = pt.Fields()["value"]
|
||||
switch p := v.(type) {
|
||||
case int64:
|
||||
retv = IntToString(int64(p))
|
||||
|
|
|
@ -55,8 +55,10 @@ func (r *Riemann) Write(points []*client.Point) error {
|
|||
|
||||
var events []*raidman.Event
|
||||
for _, p := range points {
|
||||
ev := buildEvent(p)
|
||||
events = append(events, ev)
|
||||
evs := buildEvents(p)
|
||||
for _, ev := range evs {
|
||||
events = append(events, ev)
|
||||
}
|
||||
}
|
||||
|
||||
var senderr = r.client.SendMulti(events)
|
||||
|
@ -68,24 +70,28 @@ func (r *Riemann) Write(points []*client.Point) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func buildEvent(p *client.Point) *raidman.Event {
|
||||
host, ok := p.Tags()["host"]
|
||||
if !ok {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
host = "unknown"
|
||||
} else {
|
||||
host = hostname
|
||||
func buildEvents(p *client.Point) []*raidman.Event {
|
||||
events := []*raidman.Event{}
|
||||
for fieldName, value := range p.Fields() {
|
||||
host, ok := p.Tags()["host"]
|
||||
if !ok {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
host = "unknown"
|
||||
} else {
|
||||
host = hostname
|
||||
}
|
||||
}
|
||||
|
||||
event := &raidman.Event{
|
||||
Host: host,
|
||||
Service: p.Name() + "_" + fieldName,
|
||||
Metric: value,
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
|
||||
var event = &raidman.Event{
|
||||
Host: host,
|
||||
Service: p.Name(),
|
||||
Metric: p.Fields()["value"],
|
||||
}
|
||||
|
||||
return event
|
||||
return events
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -247,26 +247,32 @@ func get(key []byte, host string) (map[string]string, error) {
|
|||
return data, err
|
||||
}
|
||||
|
||||
func readAerospikeStats(stats map[string]string, acc plugins.Accumulator, host, namespace string) {
|
||||
func readAerospikeStats(
|
||||
stats map[string]string,
|
||||
acc plugins.Accumulator,
|
||||
host string,
|
||||
namespace string,
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
for key, value := range stats {
|
||||
tags := map[string]string{
|
||||
"aerospike_host": host,
|
||||
"namespace": "_service",
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
tags["namespace"] = namespace
|
||||
}
|
||||
|
||||
// We are going to ignore all string based keys
|
||||
val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
if strings.Contains(key, "-") {
|
||||
key = strings.Replace(key, "-", "_", -1)
|
||||
}
|
||||
acc.Add(key, val, tags)
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
acc.AddFields("aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -31,7 +30,7 @@ func TestAerospikeStatistics(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, metric := range asMetrics {
|
||||
assert.True(t, acc.HasIntValue(metric), metric)
|
||||
assert.True(t, acc.HasIntField("aerospike", metric), metric)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -49,64 +48,67 @@ func TestReadAerospikeStatsNoNamespace(t *testing.T) {
|
|||
"stat_read_reqs": "12345",
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "")
|
||||
for k := range stats {
|
||||
if k == "stat-write-errs" {
|
||||
k = "stat_write_errs"
|
||||
}
|
||||
assert.True(t, acc.HasMeasurement(k))
|
||||
assert.True(t, acc.CheckValue(k, int64(12345)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
stats := map[string]string{
|
||||
"stat_write_errs": "12345",
|
||||
"stat_read_reqs": "12345",
|
||||
fields := map[string]interface{}{
|
||||
"stat_write_errs": int64(12345),
|
||||
"stat_read_reqs": int64(12345),
|
||||
}
|
||||
readAerospikeStats(stats, &acc, "host1", "test")
|
||||
|
||||
tags := map[string]string{
|
||||
"aerospike_host": "host1",
|
||||
"namespace": "test",
|
||||
}
|
||||
for k := range stats {
|
||||
assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil)
|
||||
"namespace": "_service",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "aerospike", fields, tags)
|
||||
}
|
||||
|
||||
func TestAerospikeUnmarshalList(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "one;two;three",
|
||||
}
|
||||
// func TestReadAerospikeStatsNamespace(t *testing.T) {
|
||||
// var acc testutil.Accumulator
|
||||
// stats := map[string]string{
|
||||
// "stat_write_errs": "12345",
|
||||
// "stat_read_reqs": "12345",
|
||||
// }
|
||||
// readAerospikeStats(stats, &acc, "host1", "test")
|
||||
|
||||
expected := []string{"one", "two", "three"}
|
||||
// tags := map[string]string{
|
||||
// "aerospike_host": "host1",
|
||||
// "namespace": "test",
|
||||
// }
|
||||
// for k := range stats {
|
||||
// assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil)
|
||||
// }
|
||||
// }
|
||||
|
||||
list, err := unmarshalListInfo(i, "test2")
|
||||
assert.True(t, err != nil)
|
||||
// func TestAerospikeUnmarshalList(t *testing.T) {
|
||||
// i := map[string]string{
|
||||
// "test": "one;two;three",
|
||||
// }
|
||||
|
||||
list, err = unmarshalListInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
equal := true
|
||||
for ix := range expected {
|
||||
if list[ix] != expected[ix] {
|
||||
equal = false
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, equal)
|
||||
}
|
||||
// expected := []string{"one", "two", "three"}
|
||||
|
||||
func TestAerospikeUnmarshalMap(t *testing.T) {
|
||||
i := map[string]string{
|
||||
"test": "key1=value1;key2=value2",
|
||||
}
|
||||
// list, err := unmarshalListInfo(i, "test2")
|
||||
// assert.True(t, err != nil)
|
||||
|
||||
expected := map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
}
|
||||
m, err := unmarshalMapInfo(i, "test")
|
||||
assert.True(t, err == nil)
|
||||
assert.True(t, reflect.DeepEqual(m, expected))
|
||||
}
|
||||
// list, err = unmarshalListInfo(i, "test")
|
||||
// assert.True(t, err == nil)
|
||||
// equal := true
|
||||
// for ix := range expected {
|
||||
// if list[ix] != expected[ix] {
|
||||
// equal = false
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// assert.True(t, equal)
|
||||
// }
|
||||
|
||||
// func TestAerospikeUnmarshalMap(t *testing.T) {
|
||||
// i := map[string]string{
|
||||
// "test": "key1=value1;key2=value2",
|
||||
// }
|
||||
|
||||
// expected := map[string]string{
|
||||
// "key1": "value1",
|
||||
// "key2": "value2",
|
||||
// }
|
||||
// m, err := unmarshalMapInfo(i, "test")
|
||||
// assert.True(t, err == nil)
|
||||
// assert.True(t, reflect.DeepEqual(m, expected))
|
||||
// }
|
||||
|
|
|
@ -72,32 +72,33 @@ func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
|
|||
tags := getTags(addr)
|
||||
|
||||
sc := bufio.NewScanner(resp.Body)
|
||||
fields := make(map[string]interface{})
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
if strings.Contains(line, ":") {
|
||||
|
||||
parts := strings.SplitN(line, ":", 2)
|
||||
key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1])
|
||||
|
||||
switch key {
|
||||
|
||||
case "Scoreboard":
|
||||
n.gatherScores(part, acc, tags)
|
||||
for field, value := range n.gatherScores(part) {
|
||||
fields[field] = value
|
||||
}
|
||||
default:
|
||||
value, err := strconv.ParseFloat(part, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("apache", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[string]string) {
|
||||
|
||||
func (n *Apache) gatherScores(data string) map[string]interface{} {
|
||||
var waiting, open int = 0, 0
|
||||
var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
|
@ -129,17 +130,20 @@ func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[str
|
|||
}
|
||||
}
|
||||
|
||||
acc.Add("scboard_waiting", float64(waiting), tags)
|
||||
acc.Add("scboard_starting", float64(S), tags)
|
||||
acc.Add("scboard_reading", float64(R), tags)
|
||||
acc.Add("scboard_sending", float64(W), tags)
|
||||
acc.Add("scboard_keepalive", float64(K), tags)
|
||||
acc.Add("scboard_dnslookup", float64(D), tags)
|
||||
acc.Add("scboard_closing", float64(C), tags)
|
||||
acc.Add("scboard_logging", float64(L), tags)
|
||||
acc.Add("scboard_finishing", float64(G), tags)
|
||||
acc.Add("scboard_idle_cleanup", float64(I), tags)
|
||||
acc.Add("scboard_open", float64(open), tags)
|
||||
fields := map[string]interface{}{
|
||||
"scboard_waiting": float64(waiting),
|
||||
"scboard_starting": float64(S),
|
||||
"scboard_reading": float64(R),
|
||||
"scboard_sending": float64(W),
|
||||
"scboard_keepalive": float64(K),
|
||||
"scboard_dnslookup": float64(D),
|
||||
"scboard_closing": float64(C),
|
||||
"scboard_logging": float64(L),
|
||||
"scboard_finishing": float64(G),
|
||||
"scboard_idle_cleanup": float64(I),
|
||||
"scboard_open": float64(open),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// Get tag(s) for the apache plugin
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -44,37 +43,31 @@ func TestHTTPApache(t *testing.T) {
|
|||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
testInt := []struct {
|
||||
measurement string
|
||||
value float64
|
||||
}{
|
||||
{"TotalAccesses", 1.29811861e+08},
|
||||
{"TotalkBytes", 5.213701865e+09},
|
||||
{"CPULoad", 6.51929},
|
||||
{"Uptime", 941553},
|
||||
{"ReqPerSec", 137.87},
|
||||
{"BytesPerSec", 5.67024e+06},
|
||||
{"BytesPerReq", 41127.4},
|
||||
{"BusyWorkers", 270},
|
||||
{"IdleWorkers", 630},
|
||||
{"ConnsTotal", 1451},
|
||||
{"ConnsAsyncWriting", 32},
|
||||
{"ConnsAsyncKeepAlive", 945},
|
||||
{"ConnsAsyncClosing", 205},
|
||||
{"scboard_waiting", 630},
|
||||
{"scboard_starting", 0},
|
||||
{"scboard_reading", 157},
|
||||
{"scboard_sending", 113},
|
||||
{"scboard_keepalive", 0},
|
||||
{"scboard_dnslookup", 0},
|
||||
{"scboard_closing", 0},
|
||||
{"scboard_logging", 0},
|
||||
{"scboard_finishing", 0},
|
||||
{"scboard_idle_cleanup", 0},
|
||||
{"scboard_open", 2850},
|
||||
}
|
||||
|
||||
for _, test := range testInt {
|
||||
assert.True(t, acc.CheckValue(test.measurement, test.value))
|
||||
fields := map[string]interface{}{
|
||||
"TotalAccesses": float64(1.29811861e+08),
|
||||
"TotalkBytes": float64(5.213701865e+09),
|
||||
"CPULoad": float64(6.51929),
|
||||
"Uptime": float64(941553),
|
||||
"ReqPerSec": float64(137.87),
|
||||
"BytesPerSec": float64(5.67024e+06),
|
||||
"BytesPerReq": float64(41127.4),
|
||||
"BusyWorkers": float64(270),
|
||||
"IdleWorkers": float64(630),
|
||||
"ConnsTotal": float64(1451),
|
||||
"ConnsAsyncWriting": float64(32),
|
||||
"ConnsAsyncKeepAlive": float64(945),
|
||||
"ConnsAsyncClosing": float64(205),
|
||||
"scboard_waiting": float64(630),
|
||||
"scboard_starting": float64(0),
|
||||
"scboard_reading": float64(157),
|
||||
"scboard_sending": float64(113),
|
||||
"scboard_keepalive": float64(0),
|
||||
"scboard_dnslookup": float64(0),
|
||||
"scboard_closing": float64(0),
|
||||
"scboard_logging": float64(0),
|
||||
"scboard_finishing": float64(0),
|
||||
"scboard_idle_cleanup": float64(0),
|
||||
"scboard_open": float64(2850),
|
||||
}
|
||||
acc.AssertContainsFields(t, "apache", fields)
|
||||
}
|
||||
|
|
|
@ -81,7 +81,9 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
|
|||
}
|
||||
rawValue := strings.TrimSpace(string(file))
|
||||
value := prettyToBytes(rawValue)
|
||||
acc.Add("dirty_data", value, tags)
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
fields["dirty_data"] = value
|
||||
|
||||
for _, path := range metrics {
|
||||
key := filepath.Base(path)
|
||||
|
@ -92,12 +94,13 @@ func (b *Bcache) gatherBcache(bdev string, acc plugins.Accumulator) error {
|
|||
}
|
||||
if key == "bypassed" {
|
||||
value := prettyToBytes(rawValue)
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = value
|
||||
} else {
|
||||
value, _ := strconv.ParseUint(rawValue, 10, 64)
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("bcache", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -117,7 +120,7 @@ func (b *Bcache) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*")
|
||||
if len(bdevs) < 1 {
|
||||
return errors.New("Can't found any bcache device")
|
||||
return errors.New("Can't find any bcache device")
|
||||
}
|
||||
for _, bdev := range bdevs {
|
||||
if restrictDevs {
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -29,11 +28,6 @@ var (
|
|||
testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10"
|
||||
)
|
||||
|
||||
type metrics struct {
|
||||
name string
|
||||
value uint64
|
||||
}
|
||||
|
||||
func TestBcacheGeneratesMetrics(t *testing.T) {
|
||||
err := os.MkdirAll(testBcacheUuidPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
@ -53,70 +47,52 @@ func TestBcacheGeneratesMetrics(t *testing.T) {
|
|||
err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", []byte(dirty_data), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data",
|
||||
[]byte(dirty_data), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed",
|
||||
[]byte(bypassed), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", []byte(cache_bypass_hits), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits",
|
||||
[]byte(cache_bypass_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", []byte(cache_bypass_misses), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses",
|
||||
[]byte(cache_bypass_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", []byte(cache_hit_ratio), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio",
|
||||
[]byte(cache_hit_ratio), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", []byte(cache_hits), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits",
|
||||
[]byte(cache_hits), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", []byte(cache_miss_collisions), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions",
|
||||
[]byte(cache_miss_collisions), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", []byte(cache_misses), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses",
|
||||
[]byte(cache_misses), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", []byte(cache_readaheads), 0644)
|
||||
err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads",
|
||||
[]byte(cache_readaheads), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
intMetrics := []*metrics{
|
||||
{
|
||||
name: "dirty_data",
|
||||
value: 1610612736,
|
||||
},
|
||||
{
|
||||
name: "bypassed",
|
||||
value: 5167704440832,
|
||||
},
|
||||
{
|
||||
name: "cache_bypass_hits",
|
||||
value: 146155333,
|
||||
},
|
||||
{
|
||||
name: "cache_bypass_misses",
|
||||
value: 0,
|
||||
},
|
||||
{
|
||||
name: "cache_hit_ratio",
|
||||
value: 90,
|
||||
},
|
||||
{
|
||||
name: "cache_hits",
|
||||
value: 511469583,
|
||||
},
|
||||
{
|
||||
name: "cache_miss_collisions",
|
||||
value: 157567,
|
||||
},
|
||||
{
|
||||
name: "cache_misses",
|
||||
value: 50616331,
|
||||
},
|
||||
{
|
||||
name: "cache_readaheads",
|
||||
value: 2,
|
||||
},
|
||||
fields := map[string]interface{}{
|
||||
"dirty_data": uint64(1610612736),
|
||||
"bypassed": uint64(5167704440832),
|
||||
"cache_bypass_hits": uint64(146155333),
|
||||
"cache_bypass_misses": uint64(0),
|
||||
"cache_hit_ratio": uint64(90),
|
||||
"cache_hits": uint64(511469583),
|
||||
"cache_miss_collisions": uint64(157567),
|
||||
"cache_misses": uint64(50616331),
|
||||
"cache_readaheads": uint64(2),
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
|
@ -126,27 +102,19 @@ func TestBcacheGeneratesMetrics(t *testing.T) {
|
|||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
//all devs
|
||||
// all devs
|
||||
b := &Bcache{BcachePath: testBcachePath}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
for _, metric := range intMetrics {
|
||||
assert.True(t, acc.HasUIntValue(metric.name), metric.name)
|
||||
assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags))
|
||||
}
|
||||
|
||||
//one exist dev
|
||||
// one exist dev
|
||||
b = &Bcache{BcachePath: testBcachePath, BcacheDevs: []string{"bcache0"}}
|
||||
|
||||
err = b.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, metric := range intMetrics {
|
||||
assert.True(t, acc.HasUIntValue(metric.name), metric.name)
|
||||
assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags))
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "bcache", fields, tags)
|
||||
|
||||
err = os.RemoveAll(os.TempDir() + "/telegraf")
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -155,6 +155,8 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
|
|||
|
||||
var read int
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{"host": addr.String()}
|
||||
for read < sz {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
|
@ -176,12 +178,11 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
|
|||
continue
|
||||
}
|
||||
|
||||
tags := map[string]string{"host": addr.String()}
|
||||
val := strings.TrimSpace(parts[1])
|
||||
|
||||
ival, err := strconv.ParseUint(val, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add(metric, ival, tags)
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -190,9 +191,9 @@ func (g *Disque) gatherServer(addr *url.URL, acc plugins.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
acc.Add(metric, fval, tags)
|
||||
fields[metric] = fval
|
||||
}
|
||||
|
||||
acc.AddFields("disque", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -55,42 +54,26 @@ func TestDisqueGeneratesMetrics(t *testing.T) {
|
|||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkInt := []struct {
|
||||
name string
|
||||
value uint64
|
||||
}{
|
||||
{"uptime", 1452705},
|
||||
{"clients", 31},
|
||||
{"blocked_clients", 13},
|
||||
{"used_memory", 1840104},
|
||||
{"used_memory_rss", 3227648},
|
||||
{"used_memory_peak", 89603656},
|
||||
{"total_connections_received", 5062777},
|
||||
{"total_commands_processed", 12308396},
|
||||
{"instantaneous_ops_per_sec", 18},
|
||||
{"latest_fork_usec", 1644},
|
||||
{"registered_jobs", 360},
|
||||
{"registered_queues", 12},
|
||||
}
|
||||
|
||||
for _, c := range checkInt {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
|
||||
checkFloat := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{"mem_fragmentation_ratio", 1.75},
|
||||
{"used_cpu_sys", 19585.73},
|
||||
{"used_cpu_user", 11255.96},
|
||||
{"used_cpu_sys_children", 1.75},
|
||||
{"used_cpu_user_children", 1.91},
|
||||
}
|
||||
|
||||
for _, c := range checkFloat {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
||||
|
@ -137,42 +120,26 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) {
|
|||
err = r.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkInt := []struct {
|
||||
name string
|
||||
value uint64
|
||||
}{
|
||||
{"uptime", 1452705},
|
||||
{"clients", 31},
|
||||
{"blocked_clients", 13},
|
||||
{"used_memory", 1840104},
|
||||
{"used_memory_rss", 3227648},
|
||||
{"used_memory_peak", 89603656},
|
||||
{"total_connections_received", 5062777},
|
||||
{"total_commands_processed", 12308396},
|
||||
{"instantaneous_ops_per_sec", 18},
|
||||
{"latest_fork_usec", 1644},
|
||||
{"registered_jobs", 360},
|
||||
{"registered_queues", 12},
|
||||
}
|
||||
|
||||
for _, c := range checkInt {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
}
|
||||
|
||||
checkFloat := []struct {
|
||||
name string
|
||||
value float64
|
||||
}{
|
||||
{"mem_fragmentation_ratio", 1.75},
|
||||
{"used_cpu_sys", 19585.73},
|
||||
{"used_cpu_user", 11255.96},
|
||||
{"used_cpu_sys_children", 1.75},
|
||||
{"used_cpu_user_children", 1.91},
|
||||
}
|
||||
|
||||
for _, c := range checkFloat {
|
||||
assert.True(t, acc.CheckValue(c.name, c.value))
|
||||
fields := map[string]interface{}{
|
||||
"uptime": uint64(1452705),
|
||||
"clients": uint64(31),
|
||||
"blocked_clients": uint64(13),
|
||||
"used_memory": uint64(1840104),
|
||||
"used_memory_rss": uint64(3227648),
|
||||
"used_memory_peak": uint64(89603656),
|
||||
"total_connections_received": uint64(5062777),
|
||||
"total_commands_processed": uint64(12308396),
|
||||
"instantaneous_ops_per_sec": uint64(18),
|
||||
"latest_fork_usec": uint64(1644),
|
||||
"registered_jobs": uint64(360),
|
||||
"registered_queues": uint64(12),
|
||||
"mem_fragmentation_ratio": float64(1.75),
|
||||
"used_cpu_sys": float64(19585.73),
|
||||
"used_cpu_user": float64(11255.96),
|
||||
"used_cpu_sys_children": float64(1.75),
|
||||
"used_cpu_user_children": float64(1.91),
|
||||
}
|
||||
acc.AssertContainsFields(t, "disque", fields)
|
||||
}
|
||||
|
||||
const testOutput = `# Server
|
||||
|
|
|
@ -31,8 +31,9 @@ contains `status`, `timed_out`, `number_of_nodes`, `number_of_data_nodes`,
|
|||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_cluster_health
|
||||
|
||||
contains `status`, `number_of_shards`, `number_of_replicas`, `active_primary_shards`,
|
||||
`active_shards`, `relocating_shards`, `initializing_shards`, `unassigned_shards` fields
|
||||
contains `status`, `number_of_shards`, `number_of_replicas`,
|
||||
`active_primary_shards`, `active_shards`, `relocating_shards`,
|
||||
`initializing_shards`, `unassigned_shards` fields
|
||||
- elasticsearch_indices
|
||||
|
||||
#### node measurements:
|
||||
|
@ -316,4 +317,4 @@ Transport statistics about sent and received bytes in cluster communication meas
|
|||
- elasticsearch_transport_rx_count value=6
|
||||
- elasticsearch_transport_rx_size_in_bytes value=1380
|
||||
- elasticsearch_transport_tx_count value=6
|
||||
- elasticsearch_transport_tx_size_in_bytes value=1380
|
||||
- elasticsearch_transport_tx_size_in_bytes value=1380
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
|
@ -141,10 +142,14 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc plugins.Accumulator) err
|
|||
"breakers": n.Breakers,
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for p, s := range stats {
|
||||
if err := e.parseInterface(acc, p, tags, s); err != nil {
|
||||
f := internal.JSONFlattener{}
|
||||
err := f.FlattenJSON("", s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_"+p, f.Fields, tags, now)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -168,7 +173,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator)
|
|||
"unassigned_shards": clusterStats.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"cluster_health",
|
||||
"elasticsearch_cluster_health",
|
||||
clusterFields,
|
||||
map[string]string{"name": clusterStats.ClusterName},
|
||||
measurementTime,
|
||||
|
@ -186,7 +191,7 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc plugins.Accumulator)
|
|||
"unassigned_shards": health.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"indices",
|
||||
"elasticsearch_indices",
|
||||
indexFields,
|
||||
map[string]string{"index": name},
|
||||
measurementTime,
|
||||
|
@ -205,7 +210,8 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
|||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK)
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
|
||||
r.StatusCode, http.StatusOK)
|
||||
}
|
||||
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
|
||||
return err
|
||||
|
@ -213,25 +219,6 @@ func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) parseInterface(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) error {
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
if err := e.parseInterface(acc, prefix+"_"+k, tags, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
acc.Add(prefix, t, tags)
|
||||
case bool, string, []interface{}:
|
||||
// ignored types
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("elasticsearch: got unexpected type %T with value %v (%s)", t, t, prefix)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugins.Add("elasticsearch", func() plugins.Plugin {
|
||||
return NewElasticsearch()
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -52,23 +52,15 @@ func TestElasticsearch(t *testing.T) {
|
|||
"node_host": "test",
|
||||
}
|
||||
|
||||
testTables := []map[string]float64{
|
||||
indicesExpected,
|
||||
osExpected,
|
||||
processExpected,
|
||||
jvmExpected,
|
||||
threadPoolExpected,
|
||||
fsExpected,
|
||||
transportExpected,
|
||||
httpExpected,
|
||||
breakersExpected,
|
||||
}
|
||||
|
||||
for _, testTable := range testTables {
|
||||
for k, v := range testTable {
|
||||
assert.NoError(t, acc.ValidateTaggedValue(k, v, tags))
|
||||
}
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherClusterStats(t *testing.T) {
|
||||
|
@ -80,29 +72,15 @@ func TestGatherClusterStats(t *testing.T) {
|
|||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.Gather(&acc))
|
||||
|
||||
var clusterHealthTests = []struct {
|
||||
measurement string
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
"cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"},
|
||||
},
|
||||
{
|
||||
"indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"},
|
||||
},
|
||||
{
|
||||
"indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"},
|
||||
},
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
map[string]string{"name": "elasticsearch_telegraf"})
|
||||
|
||||
for _, exp := range clusterHealthTests {
|
||||
assert.NoError(t, acc.ValidateTaggedFields(exp.measurement, exp.fields, exp.tags))
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v1IndexExpected,
|
||||
map[string]string{"index": "v1"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices",
|
||||
v2IndexExpected,
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
|
|
|
@ -489,271 +489,271 @@ const statsResponse = `
|
|||
}
|
||||
`
|
||||
|
||||
var indicesExpected = map[string]float64{
|
||||
"indices_id_cache_memory_size_in_bytes": 0,
|
||||
"indices_completion_size_in_bytes": 0,
|
||||
"indices_suggest_total": 0,
|
||||
"indices_suggest_time_in_millis": 0,
|
||||
"indices_suggest_current": 0,
|
||||
"indices_query_cache_memory_size_in_bytes": 0,
|
||||
"indices_query_cache_evictions": 0,
|
||||
"indices_query_cache_hit_count": 0,
|
||||
"indices_query_cache_miss_count": 0,
|
||||
"indices_store_size_in_bytes": 37715234,
|
||||
"indices_store_throttle_time_in_millis": 215,
|
||||
"indices_merges_current_docs": 0,
|
||||
"indices_merges_current_size_in_bytes": 0,
|
||||
"indices_merges_total": 133,
|
||||
"indices_merges_total_time_in_millis": 21060,
|
||||
"indices_merges_total_docs": 203672,
|
||||
"indices_merges_total_size_in_bytes": 142900226,
|
||||
"indices_merges_current": 0,
|
||||
"indices_filter_cache_memory_size_in_bytes": 7384,
|
||||
"indices_filter_cache_evictions": 0,
|
||||
"indices_indexing_index_total": 84790,
|
||||
"indices_indexing_index_time_in_millis": 29680,
|
||||
"indices_indexing_index_current": 0,
|
||||
"indices_indexing_noop_update_total": 0,
|
||||
"indices_indexing_throttle_time_in_millis": 0,
|
||||
"indices_indexing_delete_total": 13879,
|
||||
"indices_indexing_delete_time_in_millis": 1139,
|
||||
"indices_indexing_delete_current": 0,
|
||||
"indices_get_exists_time_in_millis": 0,
|
||||
"indices_get_missing_total": 1,
|
||||
"indices_get_missing_time_in_millis": 2,
|
||||
"indices_get_current": 0,
|
||||
"indices_get_total": 1,
|
||||
"indices_get_time_in_millis": 2,
|
||||
"indices_get_exists_total": 0,
|
||||
"indices_refresh_total": 1076,
|
||||
"indices_refresh_total_time_in_millis": 20078,
|
||||
"indices_percolate_current": 0,
|
||||
"indices_percolate_memory_size_in_bytes": -1,
|
||||
"indices_percolate_queries": 0,
|
||||
"indices_percolate_total": 0,
|
||||
"indices_percolate_time_in_millis": 0,
|
||||
"indices_translog_operations": 17702,
|
||||
"indices_translog_size_in_bytes": 17,
|
||||
"indices_recovery_current_as_source": 0,
|
||||
"indices_recovery_current_as_target": 0,
|
||||
"indices_recovery_throttle_time_in_millis": 0,
|
||||
"indices_docs_count": 29652,
|
||||
"indices_docs_deleted": 5229,
|
||||
"indices_flush_total_time_in_millis": 2401,
|
||||
"indices_flush_total": 115,
|
||||
"indices_fielddata_memory_size_in_bytes": 12996,
|
||||
"indices_fielddata_evictions": 0,
|
||||
"indices_search_fetch_current": 0,
|
||||
"indices_search_open_contexts": 0,
|
||||
"indices_search_query_total": 1452,
|
||||
"indices_search_query_time_in_millis": 5695,
|
||||
"indices_search_query_current": 0,
|
||||
"indices_search_fetch_total": 414,
|
||||
"indices_search_fetch_time_in_millis": 146,
|
||||
"indices_warmer_current": 0,
|
||||
"indices_warmer_total": 2319,
|
||||
"indices_warmer_total_time_in_millis": 448,
|
||||
"indices_segments_count": 134,
|
||||
"indices_segments_memory_in_bytes": 1285212,
|
||||
"indices_segments_index_writer_memory_in_bytes": 0,
|
||||
"indices_segments_index_writer_max_memory_in_bytes": 172368955,
|
||||
"indices_segments_version_map_memory_in_bytes": 611844,
|
||||
"indices_segments_fixed_bit_set_memory_in_bytes": 0,
|
||||
var indicesExpected = map[string]interface{}{
|
||||
"id_cache_memory_size_in_bytes": float64(0),
|
||||
"completion_size_in_bytes": float64(0),
|
||||
"suggest_total": float64(0),
|
||||
"suggest_time_in_millis": float64(0),
|
||||
"suggest_current": float64(0),
|
||||
"query_cache_memory_size_in_bytes": float64(0),
|
||||
"query_cache_evictions": float64(0),
|
||||
"query_cache_hit_count": float64(0),
|
||||
"query_cache_miss_count": float64(0),
|
||||
"store_size_in_bytes": float64(37715234),
|
||||
"store_throttle_time_in_millis": float64(215),
|
||||
"merges_current_docs": float64(0),
|
||||
"merges_current_size_in_bytes": float64(0),
|
||||
"merges_total": float64(133),
|
||||
"merges_total_time_in_millis": float64(21060),
|
||||
"merges_total_docs": float64(203672),
|
||||
"merges_total_size_in_bytes": float64(142900226),
|
||||
"merges_current": float64(0),
|
||||
"filter_cache_memory_size_in_bytes": float64(7384),
|
||||
"filter_cache_evictions": float64(0),
|
||||
"indexing_index_total": float64(84790),
|
||||
"indexing_index_time_in_millis": float64(29680),
|
||||
"indexing_index_current": float64(0),
|
||||
"indexing_noop_update_total": float64(0),
|
||||
"indexing_throttle_time_in_millis": float64(0),
|
||||
"indexing_delete_total": float64(13879),
|
||||
"indexing_delete_time_in_millis": float64(1139),
|
||||
"indexing_delete_current": float64(0),
|
||||
"get_exists_time_in_millis": float64(0),
|
||||
"get_missing_total": float64(1),
|
||||
"get_missing_time_in_millis": float64(2),
|
||||
"get_current": float64(0),
|
||||
"get_total": float64(1),
|
||||
"get_time_in_millis": float64(2),
|
||||
"get_exists_total": float64(0),
|
||||
"refresh_total": float64(1076),
|
||||
"refresh_total_time_in_millis": float64(20078),
|
||||
"percolate_current": float64(0),
|
||||
"percolate_memory_size_in_bytes": float64(-1),
|
||||
"percolate_queries": float64(0),
|
||||
"percolate_total": float64(0),
|
||||
"percolate_time_in_millis": float64(0),
|
||||
"translog_operations": float64(17702),
|
||||
"translog_size_in_bytes": float64(17),
|
||||
"recovery_current_as_source": float64(0),
|
||||
"recovery_current_as_target": float64(0),
|
||||
"recovery_throttle_time_in_millis": float64(0),
|
||||
"docs_count": float64(29652),
|
||||
"docs_deleted": float64(5229),
|
||||
"flush_total_time_in_millis": float64(2401),
|
||||
"flush_total": float64(115),
|
||||
"fielddata_memory_size_in_bytes": float64(12996),
|
||||
"fielddata_evictions": float64(0),
|
||||
"search_fetch_current": float64(0),
|
||||
"search_open_contexts": float64(0),
|
||||
"search_query_total": float64(1452),
|
||||
"search_query_time_in_millis": float64(5695),
|
||||
"search_query_current": float64(0),
|
||||
"search_fetch_total": float64(414),
|
||||
"search_fetch_time_in_millis": float64(146),
|
||||
"warmer_current": float64(0),
|
||||
"warmer_total": float64(2319),
|
||||
"warmer_total_time_in_millis": float64(448),
|
||||
"segments_count": float64(134),
|
||||
"segments_memory_in_bytes": float64(1285212),
|
||||
"segments_index_writer_memory_in_bytes": float64(0),
|
||||
"segments_index_writer_max_memory_in_bytes": float64(172368955),
|
||||
"segments_version_map_memory_in_bytes": float64(611844),
|
||||
"segments_fixed_bit_set_memory_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var osExpected = map[string]float64{
|
||||
"os_swap_used_in_bytes": 0,
|
||||
"os_swap_free_in_bytes": 487997440,
|
||||
"os_timestamp": 1436460392944,
|
||||
"os_mem_free_percent": 74,
|
||||
"os_mem_used_percent": 25,
|
||||
"os_mem_actual_free_in_bytes": 1565470720,
|
||||
"os_mem_actual_used_in_bytes": 534159360,
|
||||
"os_mem_free_in_bytes": 477761536,
|
||||
"os_mem_used_in_bytes": 1621868544,
|
||||
var osExpected = map[string]interface{}{
|
||||
"swap_used_in_bytes": float64(0),
|
||||
"swap_free_in_bytes": float64(487997440),
|
||||
"timestamp": float64(1436460392944),
|
||||
"mem_free_percent": float64(74),
|
||||
"mem_used_percent": float64(25),
|
||||
"mem_actual_free_in_bytes": float64(1565470720),
|
||||
"mem_actual_used_in_bytes": float64(534159360),
|
||||
"mem_free_in_bytes": float64(477761536),
|
||||
"mem_used_in_bytes": float64(1621868544),
|
||||
}
|
||||
|
||||
var processExpected = map[string]float64{
|
||||
"process_mem_total_virtual_in_bytes": 4747890688,
|
||||
"process_timestamp": 1436460392945,
|
||||
"process_open_file_descriptors": 160,
|
||||
"process_cpu_total_in_millis": 15480,
|
||||
"process_cpu_percent": 2,
|
||||
"process_cpu_sys_in_millis": 1870,
|
||||
"process_cpu_user_in_millis": 13610,
|
||||
var processExpected = map[string]interface{}{
|
||||
"mem_total_virtual_in_bytes": float64(4747890688),
|
||||
"timestamp": float64(1436460392945),
|
||||
"open_file_descriptors": float64(160),
|
||||
"cpu_total_in_millis": float64(15480),
|
||||
"cpu_percent": float64(2),
|
||||
"cpu_sys_in_millis": float64(1870),
|
||||
"cpu_user_in_millis": float64(13610),
|
||||
}
|
||||
|
||||
var jvmExpected = map[string]float64{
|
||||
"jvm_timestamp": 1436460392945,
|
||||
"jvm_uptime_in_millis": 202245,
|
||||
"jvm_mem_non_heap_used_in_bytes": 39634576,
|
||||
"jvm_mem_non_heap_committed_in_bytes": 40841216,
|
||||
"jvm_mem_pools_young_max_in_bytes": 279183360,
|
||||
"jvm_mem_pools_young_peak_used_in_bytes": 71630848,
|
||||
"jvm_mem_pools_young_peak_max_in_bytes": 279183360,
|
||||
"jvm_mem_pools_young_used_in_bytes": 32685760,
|
||||
"jvm_mem_pools_survivor_peak_used_in_bytes": 8912888,
|
||||
"jvm_mem_pools_survivor_peak_max_in_bytes": 34865152,
|
||||
"jvm_mem_pools_survivor_used_in_bytes": 8912880,
|
||||
"jvm_mem_pools_survivor_max_in_bytes": 34865152,
|
||||
"jvm_mem_pools_old_peak_max_in_bytes": 724828160,
|
||||
"jvm_mem_pools_old_used_in_bytes": 11110928,
|
||||
"jvm_mem_pools_old_max_in_bytes": 724828160,
|
||||
"jvm_mem_pools_old_peak_used_in_bytes": 14354608,
|
||||
"jvm_mem_heap_used_in_bytes": 52709568,
|
||||
"jvm_mem_heap_used_percent": 5,
|
||||
"jvm_mem_heap_committed_in_bytes": 259522560,
|
||||
"jvm_mem_heap_max_in_bytes": 1038876672,
|
||||
"jvm_threads_peak_count": 45,
|
||||
"jvm_threads_count": 44,
|
||||
"jvm_gc_collectors_young_collection_count": 2,
|
||||
"jvm_gc_collectors_young_collection_time_in_millis": 98,
|
||||
"jvm_gc_collectors_old_collection_count": 1,
|
||||
"jvm_gc_collectors_old_collection_time_in_millis": 24,
|
||||
"jvm_buffer_pools_direct_count": 40,
|
||||
"jvm_buffer_pools_direct_used_in_bytes": 6304239,
|
||||
"jvm_buffer_pools_direct_total_capacity_in_bytes": 6304239,
|
||||
"jvm_buffer_pools_mapped_count": 0,
|
||||
"jvm_buffer_pools_mapped_used_in_bytes": 0,
|
||||
"jvm_buffer_pools_mapped_total_capacity_in_bytes": 0,
|
||||
var jvmExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392945),
|
||||
"uptime_in_millis": float64(202245),
|
||||
"mem_non_heap_used_in_bytes": float64(39634576),
|
||||
"mem_non_heap_committed_in_bytes": float64(40841216),
|
||||
"mem_pools_young_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_peak_used_in_bytes": float64(71630848),
|
||||
"mem_pools_young_peak_max_in_bytes": float64(279183360),
|
||||
"mem_pools_young_used_in_bytes": float64(32685760),
|
||||
"mem_pools_survivor_peak_used_in_bytes": float64(8912888),
|
||||
"mem_pools_survivor_peak_max_in_bytes": float64(34865152),
|
||||
"mem_pools_survivor_used_in_bytes": float64(8912880),
|
||||
"mem_pools_survivor_max_in_bytes": float64(34865152),
|
||||
"mem_pools_old_peak_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_used_in_bytes": float64(11110928),
|
||||
"mem_pools_old_max_in_bytes": float64(724828160),
|
||||
"mem_pools_old_peak_used_in_bytes": float64(14354608),
|
||||
"mem_heap_used_in_bytes": float64(52709568),
|
||||
"mem_heap_used_percent": float64(5),
|
||||
"mem_heap_committed_in_bytes": float64(259522560),
|
||||
"mem_heap_max_in_bytes": float64(1038876672),
|
||||
"threads_peak_count": float64(45),
|
||||
"threads_count": float64(44),
|
||||
"gc_collectors_young_collection_count": float64(2),
|
||||
"gc_collectors_young_collection_time_in_millis": float64(98),
|
||||
"gc_collectors_old_collection_count": float64(1),
|
||||
"gc_collectors_old_collection_time_in_millis": float64(24),
|
||||
"buffer_pools_direct_count": float64(40),
|
||||
"buffer_pools_direct_used_in_bytes": float64(6304239),
|
||||
"buffer_pools_direct_total_capacity_in_bytes": float64(6304239),
|
||||
"buffer_pools_mapped_count": float64(0),
|
||||
"buffer_pools_mapped_used_in_bytes": float64(0),
|
||||
"buffer_pools_mapped_total_capacity_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var threadPoolExpected = map[string]float64{
|
||||
"thread_pool_merge_threads": 6,
|
||||
"thread_pool_merge_queue": 4,
|
||||
"thread_pool_merge_active": 5,
|
||||
"thread_pool_merge_rejected": 2,
|
||||
"thread_pool_merge_largest": 5,
|
||||
"thread_pool_merge_completed": 1,
|
||||
"thread_pool_bulk_threads": 4,
|
||||
"thread_pool_bulk_queue": 5,
|
||||
"thread_pool_bulk_active": 7,
|
||||
"thread_pool_bulk_rejected": 3,
|
||||
"thread_pool_bulk_largest": 1,
|
||||
"thread_pool_bulk_completed": 4,
|
||||
"thread_pool_warmer_threads": 2,
|
||||
"thread_pool_warmer_queue": 7,
|
||||
"thread_pool_warmer_active": 3,
|
||||
"thread_pool_warmer_rejected": 2,
|
||||
"thread_pool_warmer_largest": 3,
|
||||
"thread_pool_warmer_completed": 1,
|
||||
"thread_pool_get_largest": 2,
|
||||
"thread_pool_get_completed": 1,
|
||||
"thread_pool_get_threads": 1,
|
||||
"thread_pool_get_queue": 8,
|
||||
"thread_pool_get_active": 4,
|
||||
"thread_pool_get_rejected": 3,
|
||||
"thread_pool_index_threads": 6,
|
||||
"thread_pool_index_queue": 8,
|
||||
"thread_pool_index_active": 4,
|
||||
"thread_pool_index_rejected": 2,
|
||||
"thread_pool_index_largest": 3,
|
||||
"thread_pool_index_completed": 6,
|
||||
"thread_pool_suggest_threads": 2,
|
||||
"thread_pool_suggest_queue": 7,
|
||||
"thread_pool_suggest_active": 2,
|
||||
"thread_pool_suggest_rejected": 1,
|
||||
"thread_pool_suggest_largest": 8,
|
||||
"thread_pool_suggest_completed": 3,
|
||||
"thread_pool_fetch_shard_store_queue": 7,
|
||||
"thread_pool_fetch_shard_store_active": 4,
|
||||
"thread_pool_fetch_shard_store_rejected": 2,
|
||||
"thread_pool_fetch_shard_store_largest": 4,
|
||||
"thread_pool_fetch_shard_store_completed": 1,
|
||||
"thread_pool_fetch_shard_store_threads": 1,
|
||||
"thread_pool_management_threads": 2,
|
||||
"thread_pool_management_queue": 3,
|
||||
"thread_pool_management_active": 1,
|
||||
"thread_pool_management_rejected": 6,
|
||||
"thread_pool_management_largest": 2,
|
||||
"thread_pool_management_completed": 22,
|
||||
"thread_pool_percolate_queue": 23,
|
||||
"thread_pool_percolate_active": 13,
|
||||
"thread_pool_percolate_rejected": 235,
|
||||
"thread_pool_percolate_largest": 23,
|
||||
"thread_pool_percolate_completed": 33,
|
||||
"thread_pool_percolate_threads": 123,
|
||||
"thread_pool_listener_active": 4,
|
||||
"thread_pool_listener_rejected": 8,
|
||||
"thread_pool_listener_largest": 1,
|
||||
"thread_pool_listener_completed": 1,
|
||||
"thread_pool_listener_threads": 1,
|
||||
"thread_pool_listener_queue": 2,
|
||||
"thread_pool_search_rejected": 7,
|
||||
"thread_pool_search_largest": 2,
|
||||
"thread_pool_search_completed": 4,
|
||||
"thread_pool_search_threads": 5,
|
||||
"thread_pool_search_queue": 7,
|
||||
"thread_pool_search_active": 2,
|
||||
"thread_pool_fetch_shard_started_threads": 3,
|
||||
"thread_pool_fetch_shard_started_queue": 1,
|
||||
"thread_pool_fetch_shard_started_active": 5,
|
||||
"thread_pool_fetch_shard_started_rejected": 6,
|
||||
"thread_pool_fetch_shard_started_largest": 4,
|
||||
"thread_pool_fetch_shard_started_completed": 54,
|
||||
"thread_pool_refresh_rejected": 4,
|
||||
"thread_pool_refresh_largest": 8,
|
||||
"thread_pool_refresh_completed": 3,
|
||||
"thread_pool_refresh_threads": 23,
|
||||
"thread_pool_refresh_queue": 7,
|
||||
"thread_pool_refresh_active": 3,
|
||||
"thread_pool_optimize_threads": 3,
|
||||
"thread_pool_optimize_queue": 4,
|
||||
"thread_pool_optimize_active": 1,
|
||||
"thread_pool_optimize_rejected": 2,
|
||||
"thread_pool_optimize_largest": 7,
|
||||
"thread_pool_optimize_completed": 3,
|
||||
"thread_pool_snapshot_largest": 1,
|
||||
"thread_pool_snapshot_completed": 0,
|
||||
"thread_pool_snapshot_threads": 8,
|
||||
"thread_pool_snapshot_queue": 5,
|
||||
"thread_pool_snapshot_active": 6,
|
||||
"thread_pool_snapshot_rejected": 2,
|
||||
"thread_pool_generic_threads": 1,
|
||||
"thread_pool_generic_queue": 4,
|
||||
"thread_pool_generic_active": 6,
|
||||
"thread_pool_generic_rejected": 3,
|
||||
"thread_pool_generic_largest": 2,
|
||||
"thread_pool_generic_completed": 27,
|
||||
"thread_pool_flush_threads": 3,
|
||||
"thread_pool_flush_queue": 8,
|
||||
"thread_pool_flush_active": 0,
|
||||
"thread_pool_flush_rejected": 1,
|
||||
"thread_pool_flush_largest": 5,
|
||||
"thread_pool_flush_completed": 3,
|
||||
var threadPoolExpected = map[string]interface{}{
|
||||
"merge_threads": float64(6),
|
||||
"merge_queue": float64(4),
|
||||
"merge_active": float64(5),
|
||||
"merge_rejected": float64(2),
|
||||
"merge_largest": float64(5),
|
||||
"merge_completed": float64(1),
|
||||
"bulk_threads": float64(4),
|
||||
"bulk_queue": float64(5),
|
||||
"bulk_active": float64(7),
|
||||
"bulk_rejected": float64(3),
|
||||
"bulk_largest": float64(1),
|
||||
"bulk_completed": float64(4),
|
||||
"warmer_threads": float64(2),
|
||||
"warmer_queue": float64(7),
|
||||
"warmer_active": float64(3),
|
||||
"warmer_rejected": float64(2),
|
||||
"warmer_largest": float64(3),
|
||||
"warmer_completed": float64(1),
|
||||
"get_largest": float64(2),
|
||||
"get_completed": float64(1),
|
||||
"get_threads": float64(1),
|
||||
"get_queue": float64(8),
|
||||
"get_active": float64(4),
|
||||
"get_rejected": float64(3),
|
||||
"index_threads": float64(6),
|
||||
"index_queue": float64(8),
|
||||
"index_active": float64(4),
|
||||
"index_rejected": float64(2),
|
||||
"index_largest": float64(3),
|
||||
"index_completed": float64(6),
|
||||
"suggest_threads": float64(2),
|
||||
"suggest_queue": float64(7),
|
||||
"suggest_active": float64(2),
|
||||
"suggest_rejected": float64(1),
|
||||
"suggest_largest": float64(8),
|
||||
"suggest_completed": float64(3),
|
||||
"fetch_shard_store_queue": float64(7),
|
||||
"fetch_shard_store_active": float64(4),
|
||||
"fetch_shard_store_rejected": float64(2),
|
||||
"fetch_shard_store_largest": float64(4),
|
||||
"fetch_shard_store_completed": float64(1),
|
||||
"fetch_shard_store_threads": float64(1),
|
||||
"management_threads": float64(2),
|
||||
"management_queue": float64(3),
|
||||
"management_active": float64(1),
|
||||
"management_rejected": float64(6),
|
||||
"management_largest": float64(2),
|
||||
"management_completed": float64(22),
|
||||
"percolate_queue": float64(23),
|
||||
"percolate_active": float64(13),
|
||||
"percolate_rejected": float64(235),
|
||||
"percolate_largest": float64(23),
|
||||
"percolate_completed": float64(33),
|
||||
"percolate_threads": float64(123),
|
||||
"listener_active": float64(4),
|
||||
"listener_rejected": float64(8),
|
||||
"listener_largest": float64(1),
|
||||
"listener_completed": float64(1),
|
||||
"listener_threads": float64(1),
|
||||
"listener_queue": float64(2),
|
||||
"search_rejected": float64(7),
|
||||
"search_largest": float64(2),
|
||||
"search_completed": float64(4),
|
||||
"search_threads": float64(5),
|
||||
"search_queue": float64(7),
|
||||
"search_active": float64(2),
|
||||
"fetch_shard_started_threads": float64(3),
|
||||
"fetch_shard_started_queue": float64(1),
|
||||
"fetch_shard_started_active": float64(5),
|
||||
"fetch_shard_started_rejected": float64(6),
|
||||
"fetch_shard_started_largest": float64(4),
|
||||
"fetch_shard_started_completed": float64(54),
|
||||
"refresh_rejected": float64(4),
|
||||
"refresh_largest": float64(8),
|
||||
"refresh_completed": float64(3),
|
||||
"refresh_threads": float64(23),
|
||||
"refresh_queue": float64(7),
|
||||
"refresh_active": float64(3),
|
||||
"optimize_threads": float64(3),
|
||||
"optimize_queue": float64(4),
|
||||
"optimize_active": float64(1),
|
||||
"optimize_rejected": float64(2),
|
||||
"optimize_largest": float64(7),
|
||||
"optimize_completed": float64(3),
|
||||
"snapshot_largest": float64(1),
|
||||
"snapshot_completed": float64(0),
|
||||
"snapshot_threads": float64(8),
|
||||
"snapshot_queue": float64(5),
|
||||
"snapshot_active": float64(6),
|
||||
"snapshot_rejected": float64(2),
|
||||
"generic_threads": float64(1),
|
||||
"generic_queue": float64(4),
|
||||
"generic_active": float64(6),
|
||||
"generic_rejected": float64(3),
|
||||
"generic_largest": float64(2),
|
||||
"generic_completed": float64(27),
|
||||
"flush_threads": float64(3),
|
||||
"flush_queue": float64(8),
|
||||
"flush_active": float64(0),
|
||||
"flush_rejected": float64(1),
|
||||
"flush_largest": float64(5),
|
||||
"flush_completed": float64(3),
|
||||
}
|
||||
|
||||
var fsExpected = map[string]float64{
|
||||
"fs_timestamp": 1436460392946,
|
||||
"fs_total_free_in_bytes": 16909316096,
|
||||
"fs_total_available_in_bytes": 15894814720,
|
||||
"fs_total_total_in_bytes": 19507089408,
|
||||
var fsExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392946),
|
||||
"total_free_in_bytes": float64(16909316096),
|
||||
"total_available_in_bytes": float64(15894814720),
|
||||
"total_total_in_bytes": float64(19507089408),
|
||||
}
|
||||
|
||||
var transportExpected = map[string]float64{
|
||||
"transport_server_open": 13,
|
||||
"transport_rx_count": 6,
|
||||
"transport_rx_size_in_bytes": 1380,
|
||||
"transport_tx_count": 6,
|
||||
"transport_tx_size_in_bytes": 1380,
|
||||
var transportExpected = map[string]interface{}{
|
||||
"server_open": float64(13),
|
||||
"rx_count": float64(6),
|
||||
"rx_size_in_bytes": float64(1380),
|
||||
"tx_count": float64(6),
|
||||
"tx_size_in_bytes": float64(1380),
|
||||
}
|
||||
|
||||
var httpExpected = map[string]float64{
|
||||
"http_current_open": 3,
|
||||
"http_total_opened": 3,
|
||||
var httpExpected = map[string]interface{}{
|
||||
"current_open": float64(3),
|
||||
"total_opened": float64(3),
|
||||
}
|
||||
|
||||
var breakersExpected = map[string]float64{
|
||||
"breakers_fielddata_estimated_size_in_bytes": 0,
|
||||
"breakers_fielddata_overhead": 1.03,
|
||||
"breakers_fielddata_tripped": 0,
|
||||
"breakers_fielddata_limit_size_in_bytes": 623326003,
|
||||
"breakers_request_estimated_size_in_bytes": 0,
|
||||
"breakers_request_overhead": 1.0,
|
||||
"breakers_request_tripped": 0,
|
||||
"breakers_request_limit_size_in_bytes": 415550668,
|
||||
"breakers_parent_overhead": 1.0,
|
||||
"breakers_parent_tripped": 0,
|
||||
"breakers_parent_limit_size_in_bytes": 727213670,
|
||||
"breakers_parent_estimated_size_in_bytes": 0,
|
||||
var breakersExpected = map[string]interface{}{
|
||||
"fielddata_estimated_size_in_bytes": float64(0),
|
||||
"fielddata_overhead": float64(1.03),
|
||||
"fielddata_tripped": float64(0),
|
||||
"fielddata_limit_size_in_bytes": float64(623326003),
|
||||
"request_estimated_size_in_bytes": float64(0),
|
||||
"request_overhead": float64(1.0),
|
||||
"request_tripped": float64(0),
|
||||
"request_limit_size_in_bytes": float64(415550668),
|
||||
"parent_overhead": float64(1.0),
|
||||
"parent_tripped": float64(0),
|
||||
"parent_limit_size_in_bytes": float64(727213670),
|
||||
"parent_estimated_size_in_bytes": float64(0),
|
||||
}
|
||||
|
|
|
@ -3,59 +3,38 @@ package exec
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gonuts/go-shellquote"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"math"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gonuts/go-shellquote"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
const sampleConfig = `
|
||||
# specify commands via an array of tables
|
||||
[[plugins.exec.commands]]
|
||||
# the command to run
|
||||
command = "/usr/bin/mycollector --foo=bar"
|
||||
|
||||
# name of the command (used as a prefix for measurements)
|
||||
name = "mycollector"
|
||||
|
||||
# Only run this command if it has been at least this many
|
||||
# seconds since it last ran
|
||||
interval = 10
|
||||
`
|
||||
|
||||
type Exec struct {
|
||||
Commands []*Command
|
||||
runner Runner
|
||||
clock Clock
|
||||
}
|
||||
Command string
|
||||
Name string
|
||||
|
||||
type Command struct {
|
||||
Command string
|
||||
Name string
|
||||
Interval int
|
||||
lastRunAt time.Time
|
||||
runner Runner
|
||||
}
|
||||
|
||||
type Runner interface {
|
||||
Run(*Command) ([]byte, error)
|
||||
}
|
||||
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
Run(*Exec) ([]byte, error)
|
||||
}
|
||||
|
||||
type CommandRunner struct{}
|
||||
|
||||
type RealClock struct{}
|
||||
|
||||
func (c CommandRunner) Run(command *Command) ([]byte, error) {
|
||||
command.lastRunAt = time.Now()
|
||||
split_cmd, err := shellquote.Split(command.Command)
|
||||
func (c CommandRunner) Run(e *Exec) ([]byte, error) {
|
||||
split_cmd, err := shellquote.Split(e.Command)
|
||||
if err != nil || len(split_cmd) == 0 {
|
||||
return nil, fmt.Errorf("exec: unable to parse command, %s", err)
|
||||
}
|
||||
|
@ -65,18 +44,14 @@ func (c CommandRunner) Run(command *Command) ([]byte, error) {
|
|||
cmd.Stdout = &out
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, command.Command)
|
||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, e.Command)
|
||||
}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
func (c RealClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func NewExec() *Exec {
|
||||
return &Exec{runner: CommandRunner{}, clock: RealClock{}}
|
||||
return &Exec{runner: CommandRunner{}}
|
||||
}
|
||||
|
||||
func (e *Exec) SampleConfig() string {
|
||||
|
@ -88,73 +63,34 @@ func (e *Exec) Description() string {
|
|||
}
|
||||
|
||||
func (e *Exec) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
errorChannel := make(chan error, len(e.Commands))
|
||||
|
||||
for _, c := range e.Commands {
|
||||
wg.Add(1)
|
||||
go func(c *Command, acc plugins.Accumulator) {
|
||||
defer wg.Done()
|
||||
err := e.gatherCommand(c, acc)
|
||||
if err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(c, acc)
|
||||
out, err := e.runner.Run(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errorChannel)
|
||||
|
||||
// Get all errors and return them as one giant error
|
||||
errorStrings := []string{}
|
||||
for err := range errorChannel {
|
||||
errorStrings = append(errorStrings, err.Error())
|
||||
var jsonOut interface{}
|
||||
err = json.Unmarshal(out, &jsonOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s",
|
||||
e.Command, err)
|
||||
}
|
||||
|
||||
if len(errorStrings) == 0 {
|
||||
return nil
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.New(strings.Join(errorStrings, "\n"))
|
||||
}
|
||||
|
||||
func (e *Exec) gatherCommand(c *Command, acc plugins.Accumulator) error {
|
||||
secondsSinceLastRun := 0.0
|
||||
|
||||
if c.lastRunAt.Unix() == 0 { // means time is uninitialized
|
||||
secondsSinceLastRun = math.Inf(1)
|
||||
var msrmnt_name string
|
||||
if e.Name == "" {
|
||||
msrmnt_name = "exec"
|
||||
} else {
|
||||
secondsSinceLastRun = (e.clock.Now().Sub(c.lastRunAt)).Seconds()
|
||||
}
|
||||
|
||||
if secondsSinceLastRun >= float64(c.Interval) {
|
||||
out, err := e.runner.Run(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var jsonOut interface{}
|
||||
err = json.Unmarshal(out, &jsonOut)
|
||||
if err != nil {
|
||||
return fmt.Errorf("exec: unable to parse output of '%s' as JSON, %s", c.Command, err)
|
||||
}
|
||||
|
||||
processResponse(acc, c.Name, map[string]string{}, jsonOut)
|
||||
msrmnt_name = "exec_" + e.Name
|
||||
}
|
||||
acc.AddFields(msrmnt_name, f.Fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) {
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
processResponse(acc, prefix+"_"+k, tags, v)
|
||||
}
|
||||
case float64:
|
||||
acc.Add(prefix, v, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugins.Add("exec", func() plugins.Plugin {
|
||||
return NewExec()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
//CSV format: https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1
|
||||
|
@ -152,210 +153,208 @@ func (g *haproxy) gatherServer(addr string, acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("Unable to get valid stat result from '%s': %s", addr, err)
|
||||
}
|
||||
|
||||
importCsvResult(res.Body, acc, u.Host)
|
||||
|
||||
return nil
|
||||
return importCsvResult(res.Body, acc, u.Host)
|
||||
}
|
||||
|
||||
func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) ([][]string, error) {
|
||||
func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) error {
|
||||
csv := csv.NewReader(r)
|
||||
result, err := csv.ReadAll()
|
||||
now := time.Now()
|
||||
|
||||
for _, row := range result {
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"proxy": row[HF_PXNAME],
|
||||
"sv": row[HF_SVNAME],
|
||||
}
|
||||
for field, v := range row {
|
||||
tags := map[string]string{
|
||||
"server": host,
|
||||
"proxy": row[HF_PXNAME],
|
||||
"sv": row[HF_SVNAME],
|
||||
}
|
||||
switch field {
|
||||
case HF_QCUR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("qcur", ival, tags)
|
||||
fields["qcur"] = ival
|
||||
}
|
||||
case HF_QMAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("qmax", ival, tags)
|
||||
fields["qmax"] = ival
|
||||
}
|
||||
case HF_SCUR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("scur", ival, tags)
|
||||
fields["scur"] = ival
|
||||
}
|
||||
case HF_SMAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("smax", ival, tags)
|
||||
fields["smax"] = ival
|
||||
}
|
||||
case HF_STOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("stot", ival, tags)
|
||||
fields["stot"] = ival
|
||||
}
|
||||
case HF_BIN:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("bin", ival, tags)
|
||||
fields["bin"] = ival
|
||||
}
|
||||
case HF_BOUT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("bout", ival, tags)
|
||||
fields["bout"] = ival
|
||||
}
|
||||
case HF_DREQ:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("dreq", ival, tags)
|
||||
fields["dreq"] = ival
|
||||
}
|
||||
case HF_DRESP:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("dresp", ival, tags)
|
||||
fields["dresp"] = ival
|
||||
}
|
||||
case HF_EREQ:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("ereq", ival, tags)
|
||||
fields["ereq"] = ival
|
||||
}
|
||||
case HF_ECON:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("econ", ival, tags)
|
||||
fields["econ"] = ival
|
||||
}
|
||||
case HF_ERESP:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("eresp", ival, tags)
|
||||
fields["eresp"] = ival
|
||||
}
|
||||
case HF_WRETR:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("wretr", ival, tags)
|
||||
fields["wretr"] = ival
|
||||
}
|
||||
case HF_WREDIS:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("wredis", ival, tags)
|
||||
fields["wredis"] = ival
|
||||
}
|
||||
case HF_ACT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("active_servers", ival, tags)
|
||||
fields["active_servers"] = ival
|
||||
}
|
||||
case HF_BCK:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("backup_servers", ival, tags)
|
||||
fields["backup_servers"] = ival
|
||||
}
|
||||
case HF_DOWNTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("downtime", ival, tags)
|
||||
fields["downtime"] = ival
|
||||
}
|
||||
case HF_THROTTLE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("throttle", ival, tags)
|
||||
fields["throttle"] = ival
|
||||
}
|
||||
case HF_LBTOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("lbtot", ival, tags)
|
||||
fields["lbtot"] = ival
|
||||
}
|
||||
case HF_RATE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("rate", ival, tags)
|
||||
fields["rate"] = ival
|
||||
}
|
||||
case HF_RATE_MAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("rate_max", ival, tags)
|
||||
fields["rate_max"] = ival
|
||||
}
|
||||
case HF_CHECK_DURATION:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("check_duration", ival, tags)
|
||||
fields["check_duration"] = ival
|
||||
}
|
||||
case HF_HRSP_1xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.1xx", ival, tags)
|
||||
fields["http_response.1xx"] = ival
|
||||
}
|
||||
case HF_HRSP_2xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.2xx", ival, tags)
|
||||
fields["http_response.2xx"] = ival
|
||||
}
|
||||
case HF_HRSP_3xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.3xx", ival, tags)
|
||||
fields["http_response.3xx"] = ival
|
||||
}
|
||||
case HF_HRSP_4xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.4xx", ival, tags)
|
||||
fields["http_response.4xx"] = ival
|
||||
}
|
||||
case HF_HRSP_5xx:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("http_response.5xx", ival, tags)
|
||||
fields["http_response.5xx"] = ival
|
||||
}
|
||||
case HF_REQ_RATE:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("req_rate", ival, tags)
|
||||
fields["req_rate"] = ival
|
||||
}
|
||||
case HF_REQ_RATE_MAX:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("req_rate_max", ival, tags)
|
||||
fields["req_rate_max"] = ival
|
||||
}
|
||||
case HF_REQ_TOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("req_tot", ival, tags)
|
||||
fields["req_tot"] = ival
|
||||
}
|
||||
case HF_CLI_ABRT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("cli_abort", ival, tags)
|
||||
fields["cli_abort"] = ival
|
||||
}
|
||||
case HF_SRV_ABRT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("srv_abort", ival, tags)
|
||||
fields["srv_abort"] = ival
|
||||
}
|
||||
case HF_QTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("qtime", ival, tags)
|
||||
fields["qtime"] = ival
|
||||
}
|
||||
case HF_CTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("ctime", ival, tags)
|
||||
fields["ctime"] = ival
|
||||
}
|
||||
case HF_RTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("rtime", ival, tags)
|
||||
fields["rtime"] = ival
|
||||
}
|
||||
case HF_TTIME:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add("ttime", ival, tags)
|
||||
fields["ttime"] = ival
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
acc.AddFields("haproxy", fields, tags, now)
|
||||
}
|
||||
return result, err
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -10,20 +10,17 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdb/telegraf/internal"
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type HttpJson struct {
|
||||
Services []Service
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
Name string
|
||||
Servers []string
|
||||
Method string
|
||||
TagKeys []string
|
||||
Parameters map[string]string
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
type HTTPClient interface {
|
||||
|
@ -47,31 +44,28 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) {
|
|||
}
|
||||
|
||||
var sampleConfig = `
|
||||
# Specify services via an array of tables
|
||||
[[plugins.httpjson.services]]
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
|
||||
# a name for the service being polled
|
||||
name = "webserver_stats"
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
|
||||
# URL of each server in the service's cluster
|
||||
servers = [
|
||||
"http://localhost:9999/stats/",
|
||||
"http://localhost:9998/stats/",
|
||||
]
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
|
||||
# HTTP method to use (case-sensitive)
|
||||
method = "GET"
|
||||
# List of tag names to extract from top-level of JSON server response
|
||||
# tag_keys = [
|
||||
# "my_tag_1",
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
# List of tag names to extract from top-level of JSON server response
|
||||
# tag_keys = [
|
||||
# "my_tag_1",
|
||||
# "my_tag_2"
|
||||
# ]
|
||||
|
||||
# HTTP parameters (all values must be strings)
|
||||
[plugins.httpjson.services.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
# HTTP parameters (all values must be strings)
|
||||
[plugins.httpjson.parameters]
|
||||
event_type = "cpu_spike"
|
||||
threshold = "0.75"
|
||||
`
|
||||
|
||||
func (h *HttpJson) SampleConfig() string {
|
||||
|
@ -86,22 +80,16 @@ func (h *HttpJson) Description() string {
|
|||
func (h *HttpJson) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
totalServers := 0
|
||||
for _, service := range h.Services {
|
||||
totalServers += len(service.Servers)
|
||||
}
|
||||
errorChannel := make(chan error, totalServers)
|
||||
errorChannel := make(chan error, len(h.Servers))
|
||||
|
||||
for _, service := range h.Services {
|
||||
for _, server := range service.Servers {
|
||||
wg.Add(1)
|
||||
go func(service Service, server string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherServer(acc, service, server); err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(service, server)
|
||||
}
|
||||
for _, server := range h.Servers {
|
||||
wg.Add(1)
|
||||
go func(server string) {
|
||||
defer wg.Done()
|
||||
if err := h.gatherServer(acc, server); err != nil {
|
||||
errorChannel <- err
|
||||
}
|
||||
}(server)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
@ -129,10 +117,9 @@ func (h *HttpJson) Gather(acc plugins.Accumulator) error {
|
|||
// error: Any error that may have occurred
|
||||
func (h *HttpJson) gatherServer(
|
||||
acc plugins.Accumulator,
|
||||
service Service,
|
||||
serverURL string,
|
||||
) error {
|
||||
resp, err := h.sendRequest(service, serverURL)
|
||||
resp, err := h.sendRequest(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -146,7 +133,7 @@ func (h *HttpJson) gatherServer(
|
|||
"server": serverURL,
|
||||
}
|
||||
|
||||
for _, tag := range service.TagKeys {
|
||||
for _, tag := range h.TagKeys {
|
||||
switch v := jsonOut[tag].(type) {
|
||||
case string:
|
||||
tags[tag] = v
|
||||
|
@ -154,7 +141,19 @@ func (h *HttpJson) gatherServer(
|
|||
delete(jsonOut, tag)
|
||||
}
|
||||
|
||||
processResponse(acc, service.Name, tags, jsonOut)
|
||||
f := internal.JSONFlattener{}
|
||||
err = f.FlattenJSON("", jsonOut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var msrmnt_name string
|
||||
if h.Name == "" {
|
||||
msrmnt_name = "httpjson"
|
||||
} else {
|
||||
msrmnt_name = "httpjson_" + h.Name
|
||||
}
|
||||
acc.AddFields(msrmnt_name, f.Fields, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -165,7 +164,7 @@ func (h *HttpJson) gatherServer(
|
|||
// Returns:
|
||||
// string: body of the response
|
||||
// error : Any error that may have occurred
|
||||
func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error) {
|
||||
func (h *HttpJson) sendRequest(serverURL string) (string, error) {
|
||||
// Prepare URL
|
||||
requestURL, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
|
@ -173,13 +172,13 @@ func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error
|
|||
}
|
||||
|
||||
params := url.Values{}
|
||||
for k, v := range service.Parameters {
|
||||
for k, v := range h.Parameters {
|
||||
params.Add(k, v)
|
||||
}
|
||||
requestURL.RawQuery = params.Encode()
|
||||
|
||||
// Create + send request
|
||||
req, err := http.NewRequest(service.Method, requestURL.String(), nil)
|
||||
req, err := http.NewRequest(h.Method, requestURL.String(), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -188,6 +187,7 @@ func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
|
@ -209,23 +209,6 @@ func (h *HttpJson) sendRequest(service Service, serverURL string) (string, error
|
|||
return string(body), err
|
||||
}
|
||||
|
||||
// Flattens the map generated from the JSON object and stores its float values using a
|
||||
// plugins.Accumulator. It ignores any non-float values.
|
||||
// Parameters:
|
||||
// acc: the Accumulator to use
|
||||
// prefix: What the name of the measurement name should be prefixed by.
|
||||
// tags: telegraf tags to
|
||||
func processResponse(acc plugins.Accumulator, prefix string, tags map[string]string, v interface{}) {
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
processResponse(acc, prefix+"_"+k, tags, v)
|
||||
}
|
||||
case float64:
|
||||
acc.Add(prefix, v, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugins.Add("httpjson", func() plugins.Plugin {
|
||||
return &HttpJson{client: RealHTTPClient{client: &http.Client{}}}
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
@ -23,8 +22,6 @@ type Server struct {
|
|||
type Metric struct {
|
||||
Name string
|
||||
Jmx string
|
||||
Pass []string
|
||||
Drop []string
|
||||
}
|
||||
|
||||
type JolokiaClient interface {
|
||||
|
@ -44,7 +41,6 @@ type Jolokia struct {
|
|||
Context string
|
||||
Servers []Server
|
||||
Metrics []Metric
|
||||
Tags map[string]string
|
||||
}
|
||||
|
||||
func (j *Jolokia) SampleConfig() string {
|
||||
|
@ -52,10 +48,6 @@ func (j *Jolokia) SampleConfig() string {
|
|||
# This is the context root used to compose the jolokia url
|
||||
context = "/jolokia/read"
|
||||
|
||||
# Tags added to each measurements
|
||||
[jolokia.tags]
|
||||
group = "as"
|
||||
|
||||
# List of servers exposing jolokia read service
|
||||
[[plugins.jolokia.servers]]
|
||||
name = "stable"
|
||||
|
@ -70,23 +62,6 @@ func (j *Jolokia) SampleConfig() string {
|
|||
[[plugins.jolokia.metrics]]
|
||||
name = "heap_memory_usage"
|
||||
jmx = "/java.lang:type=Memory/HeapMemoryUsage"
|
||||
|
||||
|
||||
# This drops the 'committed' value from Eden space measurement
|
||||
[[plugins.jolokia.metrics]]
|
||||
name = "memory_eden"
|
||||
jmx = "/java.lang:type=MemoryPool,name=PS Eden Space/Usage"
|
||||
drop = [ "committed" ]
|
||||
|
||||
|
||||
# This passes only DaemonThreadCount and ThreadCount
|
||||
[[plugins.jolokia.metrics]]
|
||||
name = "heap_threads"
|
||||
jmx = "/java.lang:type=Threading"
|
||||
pass = [
|
||||
"DaemonThreadCount",
|
||||
"ThreadCount"
|
||||
]
|
||||
`
|
||||
}
|
||||
|
||||
|
@ -100,12 +75,9 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
resp, err := j.jClient.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -137,65 +109,22 @@ func (j *Jolokia) getAttr(requestUrl *url.URL) (map[string]interface{}, error) {
|
|||
return jsonOut, nil
|
||||
}
|
||||
|
||||
func (m *Metric) shouldPass(field string) bool {
|
||||
|
||||
if m.Pass != nil {
|
||||
|
||||
for _, pass := range m.Pass {
|
||||
if strings.HasPrefix(field, pass) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if m.Drop != nil {
|
||||
|
||||
for _, drop := range m.Drop {
|
||||
if strings.HasPrefix(field, drop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Metric) filterFields(fields map[string]interface{}) map[string]interface{} {
|
||||
|
||||
for field, _ := range fields {
|
||||
if !m.shouldPass(field) {
|
||||
delete(fields, field)
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (j *Jolokia) Gather(acc plugins.Accumulator) error {
|
||||
|
||||
context := j.Context //"/jolokia/read"
|
||||
servers := j.Servers
|
||||
metrics := j.Metrics
|
||||
tags := j.Tags
|
||||
|
||||
if tags == nil {
|
||||
tags = map[string]string{}
|
||||
}
|
||||
tags := make(map[string]string)
|
||||
|
||||
for _, server := range servers {
|
||||
tags["server"] = server.Name
|
||||
tags["port"] = server.Port
|
||||
tags["host"] = server.Host
|
||||
fields := make(map[string]interface{})
|
||||
for _, metric := range metrics {
|
||||
|
||||
measurement := metric.Name
|
||||
jmxPath := metric.Jmx
|
||||
|
||||
tags["server"] = server.Name
|
||||
tags["port"] = server.Port
|
||||
tags["host"] = server.Host
|
||||
|
||||
// Prepare URL
|
||||
requestUrl, err := url.Parse("http://" + server.Host + ":" +
|
||||
server.Port + context + jmxPath)
|
||||
|
@ -209,16 +138,20 @@ func (j *Jolokia) Gather(acc plugins.Accumulator) error {
|
|||
out, _ := j.getAttr(requestUrl)
|
||||
|
||||
if values, ok := out["value"]; ok {
|
||||
switch values.(type) {
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
acc.AddFields(measurement, metric.filterFields(values.(map[string]interface{})), tags)
|
||||
for k, v := range t {
|
||||
fields[measurement+"_"+k] = v
|
||||
}
|
||||
case interface{}:
|
||||
acc.Add(measurement, values.(interface{}), tags)
|
||||
fields[measurement] = t
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n", requestUrl.String())
|
||||
fmt.Printf("Missing key 'value' in '%s' output response\n",
|
||||
requestUrl.String())
|
||||
}
|
||||
}
|
||||
acc.AddFields("jolokia", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -197,6 +197,8 @@ func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins
|
|||
"node": nodeNameTrimmed,
|
||||
}
|
||||
i := 0
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for scanner.Scan() {
|
||||
key := KeyMapping[serverType][i]
|
||||
val, err := retrieveTokenAfterColon(scanner.Text())
|
||||
|
@ -207,9 +209,10 @@ func (l *LeoFS) gatherServer(endpoint string, serverType ServerType, acc plugins
|
|||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse the value:%s, err:%s", val, err)
|
||||
}
|
||||
acc.Add(key, fVal, tags)
|
||||
fields[key] = fVal
|
||||
i++
|
||||
}
|
||||
acc.AddFields("leofs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -149,19 +149,19 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping,
|
|||
return err
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for _, line := range lines {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
parts := strings.Fields(line)
|
||||
for _, wanted := range wanted_fields {
|
||||
var data uint64
|
||||
if fields[0] == wanted.inProc {
|
||||
if parts[0] == wanted.inProc {
|
||||
wanted_field := wanted.field
|
||||
// if not set, assume field[1]. Shouldn't be field[0], as
|
||||
// that's a string
|
||||
if wanted_field == 0 {
|
||||
wanted_field = 1
|
||||
}
|
||||
data, err = strconv.ParseUint((fields[wanted_field]), 10, 64)
|
||||
data, err = strconv.ParseUint((parts[wanted_field]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -169,11 +169,11 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping,
|
|||
if wanted.reportAs != "" {
|
||||
report_name = wanted.reportAs
|
||||
}
|
||||
acc.Add(report_name, data, tags)
|
||||
|
||||
fields[report_name] = data
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("lustre2", fields, tags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -75,35 +75,38 @@ func gatherReport(acc plugins.Accumulator, report Report, now time.Time) {
|
|||
tags := make(map[string]string)
|
||||
tags["id"] = report.ID
|
||||
tags["campaign_title"] = report.CampaignTitle
|
||||
acc.Add("emails_sent", report.EmailsSent, tags, now)
|
||||
acc.Add("abuse_reports", report.AbuseReports, tags, now)
|
||||
acc.Add("unsubscribed", report.Unsubscribed, tags, now)
|
||||
acc.Add("hard_bounces", report.Bounces.HardBounces, tags, now)
|
||||
acc.Add("soft_bounces", report.Bounces.SoftBounces, tags, now)
|
||||
acc.Add("syntax_errors", report.Bounces.SyntaxErrors, tags, now)
|
||||
acc.Add("forwards_count", report.Forwards.ForwardsCount, tags, now)
|
||||
acc.Add("forwards_opens", report.Forwards.ForwardsOpens, tags, now)
|
||||
acc.Add("opens_total", report.Opens.OpensTotal, tags, now)
|
||||
acc.Add("unique_opens", report.Opens.UniqueOpens, tags, now)
|
||||
acc.Add("open_rate", report.Opens.OpenRate, tags, now)
|
||||
acc.Add("clicks_total", report.Clicks.ClicksTotal, tags, now)
|
||||
acc.Add("unique_clicks", report.Clicks.UniqueClicks, tags, now)
|
||||
acc.Add("unique_subscriber_clicks", report.Clicks.UniqueSubscriberClicks, tags, now)
|
||||
acc.Add("click_rate", report.Clicks.ClickRate, tags, now)
|
||||
acc.Add("facebook_recipient_likes", report.FacebookLikes.RecipientLikes, tags, now)
|
||||
acc.Add("facebook_unique_likes", report.FacebookLikes.UniqueLikes, tags, now)
|
||||
acc.Add("facebook_likes", report.FacebookLikes.FacebookLikes, tags, now)
|
||||
acc.Add("industry_type", report.IndustryStats.Type, tags, now)
|
||||
acc.Add("industry_open_rate", report.IndustryStats.OpenRate, tags, now)
|
||||
acc.Add("industry_click_rate", report.IndustryStats.ClickRate, tags, now)
|
||||
acc.Add("industry_bounce_rate", report.IndustryStats.BounceRate, tags, now)
|
||||
acc.Add("industry_unopen_rate", report.IndustryStats.UnopenRate, tags, now)
|
||||
acc.Add("industry_unsub_rate", report.IndustryStats.UnsubRate, tags, now)
|
||||
acc.Add("industry_abuse_rate", report.IndustryStats.AbuseRate, tags, now)
|
||||
acc.Add("list_stats_sub_rate", report.ListStats.SubRate, tags, now)
|
||||
acc.Add("list_stats_unsub_rate", report.ListStats.UnsubRate, tags, now)
|
||||
acc.Add("list_stats_open_rate", report.ListStats.OpenRate, tags, now)
|
||||
acc.Add("list_stats_click_rate", report.ListStats.ClickRate, tags, now)
|
||||
fields := map[string]interface{}{
|
||||
"emails_sent": report.EmailsSent,
|
||||
"abuse_reports": report.AbuseReports,
|
||||
"unsubscribed": report.Unsubscribed,
|
||||
"hard_bounces": report.Bounces.HardBounces,
|
||||
"soft_bounces": report.Bounces.SoftBounces,
|
||||
"syntax_errors": report.Bounces.SyntaxErrors,
|
||||
"forwards_count": report.Forwards.ForwardsCount,
|
||||
"forwards_opens": report.Forwards.ForwardsOpens,
|
||||
"opens_total": report.Opens.OpensTotal,
|
||||
"unique_opens": report.Opens.UniqueOpens,
|
||||
"open_rate": report.Opens.OpenRate,
|
||||
"clicks_total": report.Clicks.ClicksTotal,
|
||||
"unique_clicks": report.Clicks.UniqueClicks,
|
||||
"unique_subscriber_clicks": report.Clicks.UniqueSubscriberClicks,
|
||||
"click_rate": report.Clicks.ClickRate,
|
||||
"facebook_recipient_likes": report.FacebookLikes.RecipientLikes,
|
||||
"facebook_unique_likes": report.FacebookLikes.UniqueLikes,
|
||||
"facebook_likes": report.FacebookLikes.FacebookLikes,
|
||||
"industry_type": report.IndustryStats.Type,
|
||||
"industry_open_rate": report.IndustryStats.OpenRate,
|
||||
"industry_click_rate": report.IndustryStats.ClickRate,
|
||||
"industry_bounce_rate": report.IndustryStats.BounceRate,
|
||||
"industry_unopen_rate": report.IndustryStats.UnopenRate,
|
||||
"industry_unsub_rate": report.IndustryStats.UnsubRate,
|
||||
"industry_abuse_rate": report.IndustryStats.AbuseRate,
|
||||
"list_stats_sub_rate": report.ListStats.SubRate,
|
||||
"list_stats_unsub_rate": report.ListStats.UnsubRate,
|
||||
"list_stats_open_rate": report.ListStats.OpenRate,
|
||||
"list_stats_click_rate": report.ListStats.ClickRate,
|
||||
}
|
||||
acc.AddFields("mailchimp", fields, tags, now)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -137,16 +137,18 @@ func (m *Memcached) gatherServer(
|
|||
tags := map[string]string{"server": address}
|
||||
|
||||
// Process values
|
||||
fields := make(map[string]interface{})
|
||||
for _, key := range sendMetrics {
|
||||
if value, ok := values[key]; ok {
|
||||
// Mostly it is the number
|
||||
if iValue, errParse := strconv.ParseInt(value, 10, 64); errParse != nil {
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = iValue
|
||||
} else {
|
||||
acc.Add(key, iValue, tags)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("memcached", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -98,7 +98,8 @@ func (m *MongoDB) gatherServer(server *Server, acc plugins.Accumulator) error {
|
|||
}
|
||||
dialInfo, err := mgo.ParseURL(dialAddrs[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error())
|
||||
return fmt.Errorf("Unable to parse URL (%s), %s\n",
|
||||
dialAddrs[0], err.Error())
|
||||
}
|
||||
dialInfo.Direct = true
|
||||
dialInfo.Timeout = time.Duration(10) * time.Second
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
|
||||
type MongodbData struct {
|
||||
StatLine *StatLine
|
||||
Fields map[string]interface{}
|
||||
Tags map[string]string
|
||||
}
|
||||
|
||||
|
@ -20,6 +21,7 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
|
|||
return &MongodbData{
|
||||
StatLine: statLine,
|
||||
Tags: tags,
|
||||
Fields: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -63,38 +65,44 @@ var WiredTigerStats = map[string]string{
|
|||
"percent_cache_used": "CacheUsedPercent",
|
||||
}
|
||||
|
||||
func (d *MongodbData) AddDefaultStats(acc plugins.Accumulator) {
|
||||
func (d *MongodbData) AddDefaultStats() {
|
||||
statLine := reflect.ValueOf(d.StatLine).Elem()
|
||||
d.addStat(acc, statLine, DefaultStats)
|
||||
d.addStat(statLine, DefaultStats)
|
||||
if d.StatLine.NodeType != "" {
|
||||
d.addStat(acc, statLine, DefaultReplStats)
|
||||
d.addStat(statLine, DefaultReplStats)
|
||||
}
|
||||
if d.StatLine.StorageEngine == "mmapv1" {
|
||||
d.addStat(acc, statLine, MmapStats)
|
||||
d.addStat(statLine, MmapStats)
|
||||
} else if d.StatLine.StorageEngine == "wiredTiger" {
|
||||
for key, value := range WiredTigerStats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
percentVal := fmt.Sprintf("%.1f", val.(float64)*100)
|
||||
floatVal, _ := strconv.ParseFloat(percentVal, 64)
|
||||
d.add(acc, key, floatVal)
|
||||
d.add(key, floatVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) addStat(acc plugins.Accumulator, statLine reflect.Value, stats map[string]string) {
|
||||
func (d *MongodbData) addStat(
|
||||
statLine reflect.Value,
|
||||
stats map[string]string,
|
||||
) {
|
||||
for key, value := range stats {
|
||||
val := statLine.FieldByName(value).Interface()
|
||||
d.add(acc, key, val)
|
||||
d.add(key, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MongodbData) add(acc plugins.Accumulator, key string, val interface{}) {
|
||||
func (d *MongodbData) add(key string, val interface{}) {
|
||||
d.Fields[key] = val
|
||||
}
|
||||
|
||||
func (d *MongodbData) flush(acc plugins.Accumulator) {
|
||||
acc.AddFields(
|
||||
key,
|
||||
map[string]interface{}{
|
||||
"value": val,
|
||||
},
|
||||
"mongodb",
|
||||
d.Fields,
|
||||
d.Tags,
|
||||
d.StatLine.Time,
|
||||
)
|
||||
d.Fields = make(map[string]interface{})
|
||||
}
|
||||
|
|
|
@ -44,7 +44,8 @@ func (s *Server) gatherData(acc plugins.Accumulator) error {
|
|||
NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds),
|
||||
s.getDefaultTags(),
|
||||
)
|
||||
data.AddDefaultStats(acc)
|
||||
data.AddDefaultStats()
|
||||
data.flush(acc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -138,6 +138,8 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
|
|||
if err != nil {
|
||||
servtag = "localhost"
|
||||
}
|
||||
tags := map[string]string{"server": servtag}
|
||||
fields := make(map[string]interface{})
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var val interface{}
|
||||
|
@ -149,12 +151,10 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
|
|||
|
||||
var found bool
|
||||
|
||||
tags := map[string]string{"server": servtag}
|
||||
|
||||
for _, mapped := range mappings {
|
||||
if strings.HasPrefix(name, mapped.onServer) {
|
||||
i, _ := strconv.Atoi(string(val.([]byte)))
|
||||
acc.Add(mapped.inExport+name[len(mapped.onServer):], i, tags)
|
||||
fields[mapped.inExport+name[len(mapped.onServer):]] = i
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
@ -170,16 +170,17 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
acc.Add("queries", i, tags)
|
||||
fields["queries"] = i
|
||||
case "Slow_queries":
|
||||
i, err := strconv.ParseInt(string(val.([]byte)), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acc.Add("slow_queries", i, tags)
|
||||
fields["slow_queries"] = i
|
||||
}
|
||||
}
|
||||
acc.AddFields("mysql", fields, tags)
|
||||
|
||||
conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
|
||||
|
||||
|
@ -193,11 +194,13 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
|
|||
}
|
||||
|
||||
tags := map[string]string{"server": servtag, "user": user}
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.Add("connections", connections, tags)
|
||||
fields["connections"] = connections
|
||||
acc.AddFields("mysql_users", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -127,14 +127,16 @@ func (n *Nginx) gatherUrl(addr *url.URL, acc plugins.Accumulator) error {
|
|||
}
|
||||
|
||||
tags := getTags(addr)
|
||||
|
||||
acc.Add("active", active, tags)
|
||||
acc.Add("accepts", accepts, tags)
|
||||
acc.Add("handled", handled, tags)
|
||||
acc.Add("requests", requests, tags)
|
||||
acc.Add("reading", reading, tags)
|
||||
acc.Add("writing", writing, tags)
|
||||
acc.Add("waiting", waiting, tags)
|
||||
fields := map[string]interface{}{
|
||||
"active": active,
|
||||
"accepts": accepts,
|
||||
"handled": handled,
|
||||
"requests": requests,
|
||||
"reading": reading,
|
||||
"writing": writing,
|
||||
"waiting": waiting,
|
||||
}
|
||||
acc.AddFields("nginx", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -198,9 +198,11 @@ func importMetric(r io.Reader, acc plugins.Accumulator, host string) (poolStat,
|
|||
"url": host,
|
||||
"pool": pool,
|
||||
}
|
||||
fields := make(map[string]interface{})
|
||||
for k, v := range stats[pool] {
|
||||
acc.Add(strings.Replace(k, " ", "_", -1), v, tags)
|
||||
fields[strings.Replace(k, " ", "_", -1)] = v
|
||||
}
|
||||
acc.AddFields("phpfpm", fields, tags)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
|
|
|
@ -82,10 +82,13 @@ func (p *Ping) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
// Calculate packet loss percentage
|
||||
loss := float64(trans-rec) / float64(trans) * 100.0
|
||||
acc.Add("packets_transmitted", trans, tags)
|
||||
acc.Add("packets_received", rec, tags)
|
||||
acc.Add("percent_packet_loss", loss, tags)
|
||||
acc.Add("average_response_ms", avg, tags)
|
||||
fields := map[string]interface{}{
|
||||
"packets_transmitted": trans,
|
||||
"packets_received": rec,
|
||||
"percent_packet_loss": loss,
|
||||
"average_response_ms": avg,
|
||||
}
|
||||
acc.AddFields("ping", fields, tags)
|
||||
}(url, acc)
|
||||
}
|
||||
|
||||
|
|
|
@ -4,53 +4,53 @@ import (
|
|||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
type Postgresql struct {
|
||||
Address string
|
||||
Databases []string
|
||||
OrderedColumns []string
|
||||
}
|
||||
|
||||
type Postgresql struct {
|
||||
Servers []*Server
|
||||
VerbatimAddress bool
|
||||
sanitizedAddress string
|
||||
}
|
||||
|
||||
var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true}
|
||||
|
||||
var sampleConfig = `
|
||||
# specify servers via an array of tables
|
||||
[[plugins.postgresql.servers]]
|
||||
|
||||
# specify address via a url matching:
|
||||
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]
|
||||
# or a simple string:
|
||||
# host=localhost user=pqotest password=... sslmode=... dbname=app_production
|
||||
#
|
||||
# All connection parameters are optional. By default, the host is localhost
|
||||
# and the user is the currently running user. For localhost, we default
|
||||
# to sslmode=disable as well.
|
||||
# All connection parameters are optional.
|
||||
#
|
||||
# Without the dbname parameter, the driver will default to a database
|
||||
# with the same name as the user. This dbname is just for instantiating a
|
||||
# connection with the server and doesn't restrict the databases we are trying
|
||||
# to grab metrics for.
|
||||
#
|
||||
address = "host=localhost user=postgres sslmode=disable"
|
||||
|
||||
address = "sslmode=disable"
|
||||
# Starting in 0.3.0 the default behavior is to convert the above given address to the
|
||||
# key value form and, for security, remove the password before using it to tag the
|
||||
# collected data.
|
||||
#
|
||||
# If you are using the URL form and/or have existing tooling matching against a previous
|
||||
# value, you might want to prevent this transformation / sanitization. Set the following
|
||||
# to true to leave it as entered for the tag.
|
||||
|
||||
# verbatim_address = true
|
||||
|
||||
# A list of databases to pull metrics about. If not specified, metrics for all
|
||||
# databases are gathered.
|
||||
|
||||
# databases = ["app_production", "blah_testing"]
|
||||
|
||||
# [[plugins.postgresql.servers]]
|
||||
# address = "influx@remoteserver"
|
||||
# databases = ["app_production", "testing"]
|
||||
`
|
||||
|
||||
func (p *Postgresql) SampleConfig() string {
|
||||
|
@ -65,42 +65,27 @@ func (p *Postgresql) IgnoredColumns() map[string]bool {
|
|||
return ignoredColumns
|
||||
}
|
||||
|
||||
var localhost = &Server{Address: "sslmode=disable"}
|
||||
var localhost = "host=localhost sslmode=disable"
|
||||
|
||||
func (p *Postgresql) Gather(acc plugins.Accumulator) error {
|
||||
if len(p.Servers) == 0 {
|
||||
p.gatherServer(localhost, acc)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, serv := range p.Servers {
|
||||
err := p.gatherServer(serv, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error {
|
||||
var query string
|
||||
|
||||
if serv.Address == "" || serv.Address == "localhost" {
|
||||
serv = localhost
|
||||
if p.Address == "" || p.Address == "localhost" {
|
||||
p.Address = localhost
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", serv.Address)
|
||||
db, err := sql.Open("postgres", p.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer db.Close()
|
||||
|
||||
if len(serv.Databases) == 0 {
|
||||
if len(p.Databases) == 0 {
|
||||
query = `SELECT * FROM pg_stat_database`
|
||||
} else {
|
||||
query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`, strings.Join(serv.Databases, "','"))
|
||||
query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`,
|
||||
strings.Join(p.Databases, "','"))
|
||||
}
|
||||
|
||||
rows, err := db.Query(query)
|
||||
|
@ -111,13 +96,13 @@ func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error {
|
|||
defer rows.Close()
|
||||
|
||||
// grab the column information from the result
|
||||
serv.OrderedColumns, err = rows.Columns()
|
||||
p.OrderedColumns, err = rows.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
err = p.accRow(rows, acc, serv)
|
||||
err = p.accRow(rows, acc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -130,20 +115,41 @@ type scanner interface {
|
|||
Scan(dest ...interface{}) error
|
||||
}
|
||||
|
||||
func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) error {
|
||||
var passwordKVMatcher, _ = regexp.Compile("password=\\S+ ?")
|
||||
|
||||
func (p *Postgresql) SanitizedAddress() (_ string, err error) {
|
||||
var canonicalizedAddress string
|
||||
|
||||
if p.sanitizedAddress == "" {
|
||||
if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") {
|
||||
canonicalizedAddress, err = pq.ParseURL(p.Address)
|
||||
if err != nil {
|
||||
return p.sanitizedAddress, err
|
||||
}
|
||||
} else {
|
||||
canonicalizedAddress = p.Address
|
||||
}
|
||||
|
||||
p.sanitizedAddress = passwordKVMatcher.ReplaceAllString(canonicalizedAddress, "")
|
||||
}
|
||||
|
||||
return p.sanitizedAddress, err
|
||||
}
|
||||
|
||||
func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator) error {
|
||||
var columnVars []interface{}
|
||||
var dbname bytes.Buffer
|
||||
|
||||
// this is where we'll store the column name with its *interface{}
|
||||
columnMap := make(map[string]*interface{})
|
||||
|
||||
for _, column := range serv.OrderedColumns {
|
||||
for _, column := range p.OrderedColumns {
|
||||
columnMap[column] = new(interface{})
|
||||
}
|
||||
|
||||
// populate the array of interface{} with the pointers in the right order
|
||||
for i := 0; i < len(columnMap); i++ {
|
||||
columnVars = append(columnVars, columnMap[serv.OrderedColumns[i]])
|
||||
columnVars = append(columnVars, columnMap[p.OrderedColumns[i]])
|
||||
}
|
||||
|
||||
// deconstruct array of variables and send to Scan
|
||||
|
@ -159,14 +165,26 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server)
|
|||
dbname.WriteString(string(dbnameChars[i]))
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": serv.Address, "db": dbname.String()}
|
||||
var tagAddress string
|
||||
if p.VerbatimAddress {
|
||||
tagAddress = p.Address
|
||||
} else {
|
||||
tagAddress, err = p.SanitizedAddress()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": tagAddress, "db": dbname.String()}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for col, val := range columnMap {
|
||||
_, ignore := ignoredColumns[col]
|
||||
if !ignore {
|
||||
acc.Add(col, *val, tags)
|
||||
fields[col] = *val
|
||||
}
|
||||
}
|
||||
acc.AddFields("postgresql", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -15,13 +15,9 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) {
|
|||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Servers: []*Server{
|
||||
{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
Databases: []string{"postgres"},
|
||||
},
|
||||
},
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
Databases: []string{"postgres"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
@ -30,7 +26,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
availableColumns := make(map[string]bool)
|
||||
for _, col := range p.Servers[0].OrderedColumns {
|
||||
for _, col := range p.OrderedColumns {
|
||||
availableColumns[col] = true
|
||||
}
|
||||
|
||||
|
@ -61,7 +57,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) {
|
|||
for _, metric := range intMetrics {
|
||||
_, ok := availableColumns[metric]
|
||||
if ok {
|
||||
assert.True(t, acc.HasIntValue(metric))
|
||||
assert.True(t, acc.HasIntField("postgresql", metric), metric)
|
||||
metricsCounted++
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +65,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) {
|
|||
for _, metric := range floatMetrics {
|
||||
_, ok := availableColumns[metric]
|
||||
if ok {
|
||||
assert.True(t, acc.HasFloatValue(metric))
|
||||
assert.True(t, acc.HasFloatField("postgresql", metric), metric)
|
||||
metricsCounted++
|
||||
}
|
||||
}
|
||||
|
@ -84,13 +80,9 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) {
|
|||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Servers: []*Server{
|
||||
{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
Databases: []string{"postgres"},
|
||||
},
|
||||
},
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
Databases: []string{"postgres"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
@ -98,24 +90,118 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) {
|
|||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
point, ok := acc.Get("xact_commit")
|
||||
point, ok := acc.Get("postgresql")
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t, "postgres", point.Tags["db"])
|
||||
}
|
||||
|
||||
func TestPostgresqlCanonicalizesAndSanitizesURLServerName(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("postgres://postgres:swordfish@%s?sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
Databases: []string{"postgres"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
point, ok := acc.Get("postgresql")
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("host=%s sslmode=disable user=postgres", testutil.GetLocalHost()),
|
||||
point.Tags["server"])
|
||||
}
|
||||
|
||||
func TestPostgresqlSanitizesKVServerName(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("host=%s user=postgres password=swordfish sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
Databases: []string{"postgres"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
point, ok := acc.Get("postgresql")
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("host=%s user=postgres sslmode=disable", testutil.GetLocalHost()),
|
||||
point.Tags["server"])
|
||||
}
|
||||
|
||||
func TestPostgresqlMaintainsVerbatimKVServerNameWhenRequested(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("host=%s user=postgres password=swordfish sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
VerbatimAddress: true,
|
||||
Databases: []string{"postgres"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
point, ok := acc.Get("postgresql")
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("host=%s user=postgres password=swordfish sslmode=disable", testutil.GetLocalHost()),
|
||||
point.Tags["server"])
|
||||
}
|
||||
|
||||
func TestPostgresqlMaintainsVerbatimURLServerNameWhenRequested(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Address: fmt.Sprintf("postgres://postgres:swordfish@%s?sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
VerbatimAddress: true,
|
||||
Databases: []string{"postgres"},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
err := p.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
point, ok := acc.Get("postgresql")
|
||||
require.True(t, ok)
|
||||
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("postgres://postgres:swordfish@%s?sslmode=disable", testutil.GetLocalHost()),
|
||||
point.Tags["server"])
|
||||
}
|
||||
|
||||
func TestPostgresqlDefaultsToAllDatabases(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Servers: []*Server{
|
||||
{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
},
|
||||
},
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
@ -126,7 +212,7 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) {
|
|||
var found bool
|
||||
|
||||
for _, pnt := range acc.Points {
|
||||
if pnt.Measurement == "xact_commit" {
|
||||
if pnt.Measurement == "postgresql" {
|
||||
if pnt.Tags["db"] == "postgres" {
|
||||
found = true
|
||||
break
|
||||
|
@ -143,12 +229,8 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) {
|
|||
}
|
||||
|
||||
p := &Postgresql{
|
||||
Servers: []*Server{
|
||||
{
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
},
|
||||
},
|
||||
Address: fmt.Sprintf("host=%s user=postgres sslmode=disable",
|
||||
testutil.GetLocalHost()),
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
|
|
@ -7,22 +7,17 @@ import (
|
|||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/shirou/gopsutil/process"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type Specification struct {
|
||||
type Procstat struct {
|
||||
PidFile string `toml:"pid_file"`
|
||||
Exe string
|
||||
Prefix string
|
||||
Pattern string
|
||||
}
|
||||
|
||||
type Procstat struct {
|
||||
Specifications []*Specification
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func NewProcstat() *Procstat {
|
||||
|
@ -30,8 +25,6 @@ func NewProcstat() *Procstat {
|
|||
}
|
||||
|
||||
var sampleConfig = `
|
||||
[[plugins.procstat.specifications]]
|
||||
prefix = "" # optional string to prefix measurements
|
||||
# Must specify one of: pid_file, exe, or pattern
|
||||
# PID file to monitor process
|
||||
pid_file = "/var/run/nginx.pid"
|
||||
|
@ -39,6 +32,9 @@ var sampleConfig = `
|
|||
# exe = "nginx"
|
||||
# pattern as argument for pgrep (ie, pgrep -f <pattern>)
|
||||
# pattern = "nginx"
|
||||
|
||||
# Field name prefix
|
||||
prefix = ""
|
||||
`
|
||||
|
||||
func (_ *Procstat) SampleConfig() string {
|
||||
|
@ -50,35 +46,26 @@ func (_ *Procstat) Description() string {
|
|||
}
|
||||
|
||||
func (p *Procstat) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, specification := range p.Specifications {
|
||||
wg.Add(1)
|
||||
go func(spec *Specification, acc plugins.Accumulator) {
|
||||
defer wg.Done()
|
||||
procs, err := spec.createProcesses()
|
||||
if err != nil {
|
||||
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s",
|
||||
spec.Exe, spec.PidFile, spec.Pattern, err.Error())
|
||||
} else {
|
||||
for _, proc := range procs {
|
||||
p := NewSpecProcessor(spec.Prefix, acc, proc)
|
||||
p.pushMetrics()
|
||||
}
|
||||
}
|
||||
}(specification, acc)
|
||||
procs, err := p.createProcesses()
|
||||
if err != nil {
|
||||
log.Printf("Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] %s",
|
||||
p.Exe, p.PidFile, p.Pattern, err.Error())
|
||||
} else {
|
||||
for _, proc := range procs {
|
||||
p := NewSpecProcessor(p.Prefix, acc, proc)
|
||||
p.pushMetrics()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (spec *Specification) createProcesses() ([]*process.Process, error) {
|
||||
func (p *Procstat) createProcesses() ([]*process.Process, error) {
|
||||
var out []*process.Process
|
||||
var errstring string
|
||||
var outerr error
|
||||
|
||||
pids, err := spec.getAllPids()
|
||||
pids, err := p.getAllPids()
|
||||
if err != nil {
|
||||
errstring += err.Error() + " "
|
||||
}
|
||||
|
@ -99,16 +86,16 @@ func (spec *Specification) createProcesses() ([]*process.Process, error) {
|
|||
return out, outerr
|
||||
}
|
||||
|
||||
func (spec *Specification) getAllPids() ([]int32, error) {
|
||||
func (p *Procstat) getAllPids() ([]int32, error) {
|
||||
var pids []int32
|
||||
var err error
|
||||
|
||||
if spec.PidFile != "" {
|
||||
pids, err = pidsFromFile(spec.PidFile)
|
||||
} else if spec.Exe != "" {
|
||||
pids, err = pidsFromExe(spec.Exe)
|
||||
} else if spec.Pattern != "" {
|
||||
pids, err = pidsFromPattern(spec.Pattern)
|
||||
if p.PidFile != "" {
|
||||
pids, err = pidsFromFile(p.PidFile)
|
||||
} else if p.Exe != "" {
|
||||
pids, err = pidsFromExe(p.Exe)
|
||||
} else if p.Pattern != "" {
|
||||
pids, err = pidsFromPattern(p.Pattern)
|
||||
} else {
|
||||
err = fmt.Errorf("Either exe, pid_file or pattern has to be specified")
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
type SpecProcessor struct {
|
||||
Prefix string
|
||||
tags map[string]string
|
||||
fields map[string]interface{}
|
||||
acc plugins.Accumulator
|
||||
proc *process.Process
|
||||
}
|
||||
|
@ -23,7 +24,12 @@ func (p *SpecProcessor) add(metric string, value interface{}) {
|
|||
} else {
|
||||
mname = p.Prefix + "_" + metric
|
||||
}
|
||||
p.acc.Add(mname, value, p.tags)
|
||||
p.fields[mname] = value
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) flush() {
|
||||
p.acc.AddFields("procstat", p.fields, p.tags)
|
||||
p.fields = make(map[string]interface{})
|
||||
}
|
||||
|
||||
func NewSpecProcessor(
|
||||
|
@ -39,6 +45,7 @@ func NewSpecProcessor(
|
|||
return &SpecProcessor{
|
||||
Prefix: prefix,
|
||||
tags: tags,
|
||||
fields: make(map[string]interface{}),
|
||||
acc: acc,
|
||||
proc: p,
|
||||
}
|
||||
|
@ -60,6 +67,7 @@ func (p *SpecProcessor) pushMetrics() {
|
|||
if err := p.pushMemoryStats(); err != nil {
|
||||
log.Printf("procstat, mem stats not available: %s", err.Error())
|
||||
}
|
||||
p.flush()
|
||||
}
|
||||
|
||||
func (p *SpecProcessor) pushFDStats() error {
|
||||
|
@ -94,21 +102,22 @@ func (p *SpecProcessor) pushIOStats() error {
|
|||
}
|
||||
|
||||
func (p *SpecProcessor) pushCPUStats() error {
|
||||
cpu, err := p.proc.CPUTimes()
|
||||
cpu_time, err := p.proc.CPUTimes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.add("cpu_user", cpu.User)
|
||||
p.add("cpu_system", cpu.System)
|
||||
p.add("cpu_idle", cpu.Idle)
|
||||
p.add("cpu_nice", cpu.Nice)
|
||||
p.add("cpu_iowait", cpu.Iowait)
|
||||
p.add("cpu_irq", cpu.Irq)
|
||||
p.add("cpu_soft_irq", cpu.Softirq)
|
||||
p.add("cpu_soft_steal", cpu.Steal)
|
||||
p.add("cpu_soft_stolen", cpu.Stolen)
|
||||
p.add("cpu_soft_guest", cpu.Guest)
|
||||
p.add("cpu_soft_guest_nice", cpu.GuestNice)
|
||||
p.add("cpu_time_user", cpu_time.User)
|
||||
p.add("cpu_time_system", cpu_time.System)
|
||||
p.add("cpu_time_idle", cpu_time.Idle)
|
||||
p.add("cpu_time_nice", cpu_time.Nice)
|
||||
p.add("cpu_time_iowait", cpu_time.Iowait)
|
||||
p.add("cpu_time_irq", cpu_time.Irq)
|
||||
p.add("cpu_time_soft_irq", cpu_time.Softirq)
|
||||
p.add("cpu_time_soft_steal", cpu_time.Steal)
|
||||
p.add("cpu_time_soft_stolen", cpu_time.Stolen)
|
||||
p.add("cpu_time_soft_guest", cpu_time.Guest)
|
||||
p.add("cpu_time_soft_guest_nice", cpu_time.GuestNice)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -80,14 +80,14 @@ func (g *Prometheus) gatherURL(url string, acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("error getting processing samples for %s: %s", url, err)
|
||||
}
|
||||
for _, sample := range samples {
|
||||
tags := map[string]string{}
|
||||
tags := make(map[string]string)
|
||||
for key, value := range sample.Metric {
|
||||
if key == model.MetricNameLabel {
|
||||
continue
|
||||
}
|
||||
tags[string(key)] = string(value)
|
||||
}
|
||||
acc.Add(string(sample.Metric[model.MetricNameLabel]),
|
||||
acc.Add("prometheus_"+string(sample.Metric[model.MetricNameLabel]),
|
||||
float64(sample.Value), tags)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,15 +104,16 @@ func (pa *PuppetAgent) Gather(acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("%s", err)
|
||||
}
|
||||
|
||||
structPrinter(&puppetState, acc)
|
||||
tags := map[string]string{"location": pa.Location}
|
||||
structPrinter(&puppetState, acc, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func structPrinter(s *State, acc plugins.Accumulator) {
|
||||
|
||||
func structPrinter(s *State, acc plugins.Accumulator, tags map[string]string) {
|
||||
e := reflect.ValueOf(s).Elem()
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for tLevelFNum := 0; tLevelFNum < e.NumField(); tLevelFNum++ {
|
||||
name := e.Type().Field(tLevelFNum).Name
|
||||
nameNumField := e.FieldByName(name).NumField()
|
||||
|
@ -123,10 +124,10 @@ func structPrinter(s *State, acc plugins.Accumulator) {
|
|||
|
||||
lname := strings.ToLower(name)
|
||||
lsName := strings.ToLower(sName)
|
||||
acc.Add(fmt.Sprintf("%s_%s", lname, lsName), sValue, nil)
|
||||
fields[fmt.Sprintf("%s_%s", lname, lsName)] = sValue
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields("puppetagent", fields, tags)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
@ -13,17 +14,13 @@ const DefaultUsername = "guest"
|
|||
const DefaultPassword = "guest"
|
||||
const DefaultURL = "http://localhost:15672"
|
||||
|
||||
type Server struct {
|
||||
type RabbitMQ struct {
|
||||
URL string
|
||||
Name string
|
||||
Username string
|
||||
Password string
|
||||
Nodes []string
|
||||
Queues []string
|
||||
}
|
||||
|
||||
type RabbitMQ struct {
|
||||
Servers []*Server
|
||||
|
||||
Client *http.Client
|
||||
}
|
||||
|
@ -94,15 +91,13 @@ type Node struct {
|
|||
SocketsUsed int64 `json:"sockets_used"`
|
||||
}
|
||||
|
||||
type gatherFunc func(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error)
|
||||
type gatherFunc func(r *RabbitMQ, acc plugins.Accumulator, errChan chan error)
|
||||
|
||||
var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues}
|
||||
|
||||
var sampleConfig = `
|
||||
# Specify servers via an array of tables
|
||||
[[plugins.rabbitmq.servers]]
|
||||
url = "http://localhost:15672" # required
|
||||
# name = "rmq-server-1" # optional tag
|
||||
# url = "http://localhost:15672"
|
||||
# username = "guest"
|
||||
# password = "guest"
|
||||
|
||||
|
@ -119,27 +114,18 @@ func (r *RabbitMQ) Description() string {
|
|||
return "Read metrics from one or many RabbitMQ servers via the management API"
|
||||
}
|
||||
|
||||
var localhost = &Server{URL: DefaultURL}
|
||||
|
||||
func (r *RabbitMQ) Gather(acc plugins.Accumulator) error {
|
||||
if r.Client == nil {
|
||||
r.Client = &http.Client{}
|
||||
}
|
||||
|
||||
var errChan = make(chan error, len(r.Servers))
|
||||
var errChan = make(chan error, len(gatherFunctions))
|
||||
|
||||
// use localhost is no servers are specified in config
|
||||
if len(r.Servers) == 0 {
|
||||
r.Servers = append(r.Servers, localhost)
|
||||
for _, f := range gatherFunctions {
|
||||
go f(r, acc, errChan)
|
||||
}
|
||||
|
||||
for _, serv := range r.Servers {
|
||||
for _, f := range gatherFunctions {
|
||||
go f(r, serv, acc, errChan)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 1; i <= len(r.Servers)*len(gatherFunctions); i++ {
|
||||
for i := 1; i <= len(gatherFunctions); i++ {
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -149,20 +135,20 @@ func (r *RabbitMQ) Gather(acc plugins.Accumulator) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *RabbitMQ) requestJSON(serv *Server, u string, target interface{}) error {
|
||||
u = fmt.Sprintf("%s%s", serv.URL, u)
|
||||
func (r *RabbitMQ) requestJSON(u string, target interface{}) error {
|
||||
u = fmt.Sprintf("%s%s", r.URL, u)
|
||||
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
username := serv.Username
|
||||
username := r.Username
|
||||
if username == "" {
|
||||
username = DefaultUsername
|
||||
}
|
||||
|
||||
password := serv.Password
|
||||
password := r.Password
|
||||
if password == "" {
|
||||
password = DefaultPassword
|
||||
}
|
||||
|
@ -181,10 +167,10 @@ func (r *RabbitMQ) requestJSON(serv *Server, u string, target interface{}) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) {
|
||||
func gatherOverview(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
|
||||
overview := &OverviewResponse{}
|
||||
|
||||
err := r.requestJSON(serv, "/api/overview", &overview)
|
||||
err := r.requestJSON("/api/overview", &overview)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
|
@ -195,76 +181,80 @@ func gatherOverview(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan
|
|||
return
|
||||
}
|
||||
|
||||
tags := map[string]string{"url": serv.URL}
|
||||
if serv.Name != "" {
|
||||
tags["name"] = serv.Name
|
||||
tags := map[string]string{"url": r.URL}
|
||||
if r.Name != "" {
|
||||
tags["name"] = r.Name
|
||||
}
|
||||
|
||||
acc.Add("messages", overview.QueueTotals.Messages, tags)
|
||||
acc.Add("messages_ready", overview.QueueTotals.MessagesReady, tags)
|
||||
acc.Add("messages_unacked", overview.QueueTotals.MessagesUnacknowledged, tags)
|
||||
|
||||
acc.Add("channels", overview.ObjectTotals.Channels, tags)
|
||||
acc.Add("connections", overview.ObjectTotals.Connections, tags)
|
||||
acc.Add("consumers", overview.ObjectTotals.Consumers, tags)
|
||||
acc.Add("exchanges", overview.ObjectTotals.Exchanges, tags)
|
||||
acc.Add("queues", overview.ObjectTotals.Queues, tags)
|
||||
|
||||
acc.Add("messages_acked", overview.MessageStats.Ack, tags)
|
||||
acc.Add("messages_delivered", overview.MessageStats.Deliver, tags)
|
||||
acc.Add("messages_published", overview.MessageStats.Publish, tags)
|
||||
fields := map[string]interface{}{
|
||||
"messages": overview.QueueTotals.Messages,
|
||||
"messages_ready": overview.QueueTotals.MessagesReady,
|
||||
"messages_unacked": overview.QueueTotals.MessagesUnacknowledged,
|
||||
"channels": overview.ObjectTotals.Channels,
|
||||
"connections": overview.ObjectTotals.Connections,
|
||||
"consumers": overview.ObjectTotals.Consumers,
|
||||
"exchanges": overview.ObjectTotals.Exchanges,
|
||||
"queues": overview.ObjectTotals.Queues,
|
||||
"messages_acked": overview.MessageStats.Ack,
|
||||
"messages_delivered": overview.MessageStats.Deliver,
|
||||
"messages_published": overview.MessageStats.Publish,
|
||||
}
|
||||
acc.AddFields("rabbitmq_overview", fields, tags)
|
||||
|
||||
errChan <- nil
|
||||
}
|
||||
|
||||
func gatherNodes(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) {
|
||||
func gatherNodes(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
|
||||
nodes := make([]Node, 0)
|
||||
// Gather information about nodes
|
||||
err := r.requestJSON(serv, "/api/nodes", &nodes)
|
||||
err := r.requestJSON("/api/nodes", &nodes)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
for _, node := range nodes {
|
||||
if !shouldGatherNode(node, serv) {
|
||||
if !r.shouldGatherNode(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
tags := map[string]string{"url": serv.URL}
|
||||
tags := map[string]string{"url": r.URL}
|
||||
tags["node"] = node.Name
|
||||
|
||||
acc.Add("disk_free", node.DiskFree, tags)
|
||||
acc.Add("disk_free_limit", node.DiskFreeLimit, tags)
|
||||
acc.Add("fd_total", node.FdTotal, tags)
|
||||
acc.Add("fd_used", node.FdUsed, tags)
|
||||
acc.Add("mem_limit", node.MemLimit, tags)
|
||||
acc.Add("mem_used", node.MemUsed, tags)
|
||||
acc.Add("proc_total", node.ProcTotal, tags)
|
||||
acc.Add("proc_used", node.ProcUsed, tags)
|
||||
acc.Add("run_queue", node.RunQueue, tags)
|
||||
acc.Add("sockets_total", node.SocketsTotal, tags)
|
||||
acc.Add("sockets_used", node.SocketsUsed, tags)
|
||||
fields := map[string]interface{}{
|
||||
"disk_free": node.DiskFree,
|
||||
"disk_free_limit": node.DiskFreeLimit,
|
||||
"fd_total": node.FdTotal,
|
||||
"fd_used": node.FdUsed,
|
||||
"mem_limit": node.MemLimit,
|
||||
"mem_used": node.MemUsed,
|
||||
"proc_total": node.ProcTotal,
|
||||
"proc_used": node.ProcUsed,
|
||||
"run_queue": node.RunQueue,
|
||||
"sockets_total": node.SocketsTotal,
|
||||
"sockets_used": node.SocketsUsed,
|
||||
}
|
||||
acc.AddFields("rabbitmq_node", fields, tags, now)
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
}
|
||||
|
||||
func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan chan error) {
|
||||
func gatherQueues(r *RabbitMQ, acc plugins.Accumulator, errChan chan error) {
|
||||
// Gather information about queues
|
||||
queues := make([]Queue, 0)
|
||||
err := r.requestJSON(serv, "/api/queues", &queues)
|
||||
err := r.requestJSON("/api/queues", &queues)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
for _, queue := range queues {
|
||||
if !shouldGatherQueue(queue, serv) {
|
||||
if !r.shouldGatherQueue(queue) {
|
||||
continue
|
||||
}
|
||||
tags := map[string]string{
|
||||
"url": serv.URL,
|
||||
"url": r.URL,
|
||||
"queue": queue.Name,
|
||||
"vhost": queue.Vhost,
|
||||
"node": queue.Node,
|
||||
|
@ -273,7 +263,7 @@ func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan ch
|
|||
}
|
||||
|
||||
acc.AddFields(
|
||||
"queue",
|
||||
"rabbitmq_queue",
|
||||
map[string]interface{}{
|
||||
// common information
|
||||
"consumers": queue.Consumers,
|
||||
|
@ -301,12 +291,12 @@ func gatherQueues(r *RabbitMQ, serv *Server, acc plugins.Accumulator, errChan ch
|
|||
errChan <- nil
|
||||
}
|
||||
|
||||
func shouldGatherNode(node Node, serv *Server) bool {
|
||||
if len(serv.Nodes) == 0 {
|
||||
func (r *RabbitMQ) shouldGatherNode(node Node) bool {
|
||||
if len(r.Nodes) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, name := range serv.Nodes {
|
||||
for _, name := range r.Nodes {
|
||||
if name == node.Name {
|
||||
return true
|
||||
}
|
||||
|
@ -315,12 +305,12 @@ func shouldGatherNode(node Node, serv *Server) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func shouldGatherQueue(queue Queue, serv *Server) bool {
|
||||
if len(serv.Queues) == 0 {
|
||||
func (r *RabbitMQ) shouldGatherQueue(queue Queue) bool {
|
||||
if len(r.Queues) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, name := range serv.Queues {
|
||||
for _, name := range r.Queues {
|
||||
if name == queue.Name {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -164,6 +164,7 @@ func gatherInfoOutput(
|
|||
var keyspace_hits, keyspace_misses uint64 = 0, 0
|
||||
|
||||
scanner := bufio.NewScanner(rdr)
|
||||
fields := make(map[string]interface{})
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.Contains(line, "ERR") {
|
||||
|
@ -199,7 +200,7 @@ func gatherInfoOutput(
|
|||
}
|
||||
|
||||
if err == nil {
|
||||
acc.Add(metric, ival, tags)
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -208,13 +209,14 @@ func gatherInfoOutput(
|
|||
return err
|
||||
}
|
||||
|
||||
acc.Add(metric, fval, tags)
|
||||
fields[metric] = fval
|
||||
}
|
||||
var keyspace_hitrate float64 = 0.0
|
||||
if keyspace_hits != 0 || keyspace_misses != 0 {
|
||||
keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses)
|
||||
}
|
||||
acc.Add("keyspace_hitrate", keyspace_hitrate, tags)
|
||||
fields["keyspace_hitrate"] = keyspace_hitrate
|
||||
acc.AddFields("redis", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -229,15 +231,17 @@ func gatherKeyspaceLine(
|
|||
tags map[string]string,
|
||||
) {
|
||||
if strings.Contains(line, "keys=") {
|
||||
fields := make(map[string]interface{})
|
||||
tags["database"] = name
|
||||
dbparts := strings.Split(line, ",")
|
||||
for _, dbp := range dbparts {
|
||||
kv := strings.Split(dbp, "=")
|
||||
ival, err := strconv.ParseUint(kv[1], 10, 64)
|
||||
if err == nil {
|
||||
acc.Add(kv[0], ival, tags)
|
||||
fields[kv[0]] = ival
|
||||
}
|
||||
}
|
||||
acc.AddFields("redis_keyspace", fields, tags)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,25 +86,30 @@ var engineStats = map[string]string{
|
|||
"total_writes": "TotalWrites",
|
||||
}
|
||||
|
||||
func (e *Engine) AddEngineStats(keys []string, acc plugins.Accumulator, tags map[string]string) {
|
||||
func (e *Engine) AddEngineStats(
|
||||
keys []string,
|
||||
acc plugins.Accumulator,
|
||||
tags map[string]string,
|
||||
) {
|
||||
engine := reflect.ValueOf(e).Elem()
|
||||
fields := make(map[string]interface{})
|
||||
for _, key := range keys {
|
||||
acc.Add(
|
||||
key,
|
||||
engine.FieldByName(engineStats[key]).Interface(),
|
||||
tags,
|
||||
)
|
||||
fields[key] = engine.FieldByName(engineStats[key]).Interface()
|
||||
}
|
||||
acc.AddFields("rethinkdb_engine", fields, tags)
|
||||
}
|
||||
|
||||
func (s *Storage) AddStats(acc plugins.Accumulator, tags map[string]string) {
|
||||
acc.Add("cache_bytes_in_use", s.Cache.BytesInUse, tags)
|
||||
acc.Add("disk_read_bytes_per_sec", s.Disk.ReadBytesPerSec, tags)
|
||||
acc.Add("disk_read_bytes_total", s.Disk.ReadBytesTotal, tags)
|
||||
acc.Add("disk_written_bytes_per_sec", s.Disk.WriteBytesPerSec, tags)
|
||||
acc.Add("disk_written_bytes_total", s.Disk.WriteBytesTotal, tags)
|
||||
acc.Add("disk_usage_data_bytes", s.Disk.SpaceUsage.Data, tags)
|
||||
acc.Add("disk_usage_garbage_bytes", s.Disk.SpaceUsage.Garbage, tags)
|
||||
acc.Add("disk_usage_metadata_bytes", s.Disk.SpaceUsage.Metadata, tags)
|
||||
acc.Add("disk_usage_preallocated_bytes", s.Disk.SpaceUsage.Prealloc, tags)
|
||||
fields := map[string]interface{}{
|
||||
"cache_bytes_in_use": s.Cache.BytesInUse,
|
||||
"disk_read_bytes_per_sec": s.Disk.ReadBytesPerSec,
|
||||
"disk_read_bytes_total": s.Disk.ReadBytesTotal,
|
||||
"disk_written_bytes_per_sec": s.Disk.WriteBytesPerSec,
|
||||
"disk_written_bytes_total": s.Disk.WriteBytesTotal,
|
||||
"disk_usage_data_bytes": s.Disk.SpaceUsage.Data,
|
||||
"disk_usage_garbage_bytes": s.Disk.SpaceUsage.Garbage,
|
||||
"disk_usage_metadata_bytes": s.Disk.SpaceUsage.Metadata,
|
||||
"disk_usage_preallocated_bytes": s.Disk.SpaceUsage.Prealloc,
|
||||
}
|
||||
acc.AddFields("rethinkdb", fields, tags)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package system
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
|
@ -31,7 +32,7 @@ var sampleConfig = `
|
|||
# Whether to report total system cpu stats or not
|
||||
totalcpu = true
|
||||
# Comment this line if you want the raw CPU time metrics
|
||||
drop = ["cpu_time*"]
|
||||
drop = ["time_*"]
|
||||
`
|
||||
|
||||
func (_ *CPUStats) SampleConfig() string {
|
||||
|
@ -43,6 +44,7 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error getting CPU info: %s", err)
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
for i, cts := range times {
|
||||
tags := map[string]string{
|
||||
|
@ -51,21 +53,24 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error {
|
|||
|
||||
total := totalCpuTime(cts)
|
||||
|
||||
// Add total cpu numbers
|
||||
add(acc, "time_user", cts.User, tags)
|
||||
add(acc, "time_system", cts.System, tags)
|
||||
add(acc, "time_idle", cts.Idle, tags)
|
||||
add(acc, "time_nice", cts.Nice, tags)
|
||||
add(acc, "time_iowait", cts.Iowait, tags)
|
||||
add(acc, "time_irq", cts.Irq, tags)
|
||||
add(acc, "time_softirq", cts.Softirq, tags)
|
||||
add(acc, "time_steal", cts.Steal, tags)
|
||||
add(acc, "time_guest", cts.Guest, tags)
|
||||
add(acc, "time_guest_nice", cts.GuestNice, tags)
|
||||
// Add cpu time metrics
|
||||
fields := map[string]interface{}{
|
||||
"time_user": cts.User,
|
||||
"time_system": cts.System,
|
||||
"time_idle": cts.Idle,
|
||||
"time_nice": cts.Nice,
|
||||
"time_iowait": cts.Iowait,
|
||||
"time_irq": cts.Irq,
|
||||
"time_softirq": cts.Softirq,
|
||||
"time_steal": cts.Steal,
|
||||
"time_guest": cts.Guest,
|
||||
"time_guest_nice": cts.GuestNice,
|
||||
}
|
||||
|
||||
// Add in percentage
|
||||
if len(s.lastStats) == 0 {
|
||||
// If it's the 1st gather, can't get CPU stats yet
|
||||
acc.AddFields("cpu", fields, tags, now)
|
||||
// If it's the 1st gather, can't get CPU Usage stats yet
|
||||
continue
|
||||
}
|
||||
lastCts := s.lastStats[i]
|
||||
|
@ -81,17 +86,17 @@ func (s *CPUStats) Gather(acc plugins.Accumulator) error {
|
|||
continue
|
||||
}
|
||||
|
||||
add(acc, "usage_user", 100*(cts.User-lastCts.User)/totalDelta, tags)
|
||||
add(acc, "usage_system", 100*(cts.System-lastCts.System)/totalDelta, tags)
|
||||
add(acc, "usage_idle", 100*(cts.Idle-lastCts.Idle)/totalDelta, tags)
|
||||
add(acc, "usage_nice", 100*(cts.Nice-lastCts.Nice)/totalDelta, tags)
|
||||
add(acc, "usage_iowait", 100*(cts.Iowait-lastCts.Iowait)/totalDelta, tags)
|
||||
add(acc, "usage_irq", 100*(cts.Irq-lastCts.Irq)/totalDelta, tags)
|
||||
add(acc, "usage_softirq", 100*(cts.Softirq-lastCts.Softirq)/totalDelta, tags)
|
||||
add(acc, "usage_steal", 100*(cts.Steal-lastCts.Steal)/totalDelta, tags)
|
||||
add(acc, "usage_guest", 100*(cts.Guest-lastCts.Guest)/totalDelta, tags)
|
||||
add(acc, "usage_guest_nice", 100*(cts.GuestNice-lastCts.GuestNice)/totalDelta, tags)
|
||||
|
||||
fields["usage_user"] = 100 * (cts.User - lastCts.User) / totalDelta
|
||||
fields["usage_system"] = 100 * (cts.System - lastCts.System) / totalDelta
|
||||
fields["usage_idle"] = 100 * (cts.Idle - lastCts.Idle) / totalDelta
|
||||
fields["usage_nice"] = 100 * (cts.Nice - lastCts.Nice) / totalDelta
|
||||
fields["usage_iowait"] = 100 * (cts.Iowait - lastCts.Iowait) / totalDelta
|
||||
fields["usage_irq"] = 100 * (cts.Irq - lastCts.Irq) / totalDelta
|
||||
fields["usage_softirq"] = 100 * (cts.Softirq - lastCts.Softirq) / totalDelta
|
||||
fields["usage_steal"] = 100 * (cts.Steal - lastCts.Steal) / totalDelta
|
||||
fields["usage_guest"] = 100 * (cts.Guest - lastCts.Guest) / totalDelta
|
||||
fields["usage_guest_nice"] = 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta
|
||||
acc.AddFields("cpu", fields, tags, now)
|
||||
}
|
||||
|
||||
s.lastStats = times
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCPUStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
cts := cpu.CPUTimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 3.1,
|
||||
System: 8.2,
|
||||
Idle: 80.1,
|
||||
Nice: 1.3,
|
||||
Iowait: 0.2,
|
||||
Irq: 0.1,
|
||||
Softirq: 0.11,
|
||||
Steal: 0.0511,
|
||||
Guest: 8.1,
|
||||
GuestNice: 0.324,
|
||||
}
|
||||
|
||||
cts2 := cpu.CPUTimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 11.4, // increased by 8.3
|
||||
System: 10.9, // increased by 2.7
|
||||
Idle: 158.8699, // increased by 78.7699 (for total increase of 100)
|
||||
Nice: 2.5, // increased by 1.2
|
||||
Iowait: 0.7, // increased by 0.5
|
||||
Irq: 1.2, // increased by 1.1
|
||||
Softirq: 0.31, // increased by 0.2
|
||||
Steal: 0.2812, // increased by 0.0001
|
||||
Guest: 12.9, // increased by 4.8
|
||||
GuestNice: 2.524, // increased by 2.2
|
||||
}
|
||||
|
||||
mps.On("CPUTimes").Return([]cpu.CPUTimesStat{cts}, nil)
|
||||
|
||||
cs := NewCPUStats(&mps)
|
||||
|
||||
cputags := map[string]string{
|
||||
"cpu": "cpu0",
|
||||
}
|
||||
|
||||
err := cs.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Computed values are checked with delta > 0 becasue of floating point arithmatic
|
||||
// imprecision
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 3.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 1.3, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 0.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.11, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.0511, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 8.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags)
|
||||
|
||||
mps2 := MockPS{}
|
||||
mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil)
|
||||
cs.ps = &mps2
|
||||
|
||||
// Should have added cpu percentages too
|
||||
err = cs.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 11.4, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 10.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 158.8699, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 2.5, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.7, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 1.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.31, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.2812, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 12.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 2.524, 0, cputags)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 8.3, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_system", 2.7, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 78.7699, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_nice", 1.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 0.5, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_irq", 1.1, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_softirq", 0.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_steal", 0.2301, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest", 4.8, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags)
|
||||
}
|
||||
|
||||
// Asserts that a given accumulator contains a measurment of type float64 with
|
||||
// specific tags within a certain distance of a given expected value. Asserts a failure
|
||||
// if the measurement is of the wrong type, or if no matching measurements are found
|
||||
//
|
||||
// Paramaters:
|
||||
// t *testing.T : Testing object to use
|
||||
// acc testutil.Accumulator: Accumulator to examine
|
||||
// measurement string : Name of the measurement to examine
|
||||
// expectedValue float64 : Value to search for within the measurement
|
||||
// delta float64 : Maximum acceptable distance of an accumulated value
|
||||
// from the expectedValue parameter. Useful when
|
||||
// floating-point arithmatic imprecision makes looking
|
||||
// for an exact match impractical
|
||||
// tags map[string]string : Tag set the found measurement must have. Set to nil to
|
||||
// ignore the tag set.
|
||||
func assertContainsTaggedFloat(
|
||||
t *testing.T,
|
||||
acc *testutil.Accumulator,
|
||||
measurement string,
|
||||
field string,
|
||||
expectedValue float64,
|
||||
delta float64,
|
||||
tags map[string]string,
|
||||
) {
|
||||
var actualValue float64
|
||||
for _, pt := range acc.Points {
|
||||
if pt.Measurement == measurement {
|
||||
for fieldname, value := range pt.Fields {
|
||||
if fieldname == field {
|
||||
if value, ok := value.(float64); ok {
|
||||
actualValue = value
|
||||
if (value >= expectedValue-delta) && (value <= expectedValue+delta) {
|
||||
// Found the point, return without failing
|
||||
return
|
||||
}
|
||||
} else {
|
||||
assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64",
|
||||
measurement))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
msg := fmt.Sprintf(
|
||||
"Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f",
|
||||
measurement, delta, expectedValue, actualValue)
|
||||
assert.Fail(t, msg)
|
||||
}
|
|
@ -50,12 +50,15 @@ func (s *DiskStats) Gather(acc plugins.Accumulator) error {
|
|||
"path": du.Path,
|
||||
"fstype": du.Fstype,
|
||||
}
|
||||
acc.Add("total", du.Total, tags)
|
||||
acc.Add("free", du.Free, tags)
|
||||
acc.Add("used", du.Total-du.Free, tags)
|
||||
acc.Add("inodes_total", du.InodesTotal, tags)
|
||||
acc.Add("inodes_free", du.InodesFree, tags)
|
||||
acc.Add("inodes_used", du.InodesTotal-du.InodesFree, tags)
|
||||
fields := map[string]interface{}{
|
||||
"total": du.Total,
|
||||
"free": du.Free,
|
||||
"used": du.Total - du.Free,
|
||||
"inodes_total": du.InodesTotal,
|
||||
"inodes_free": du.InodesFree,
|
||||
"inodes_used": du.InodesTotal - du.InodesFree,
|
||||
}
|
||||
acc.AddFields("disk", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -115,13 +118,16 @@ func (s *DiskIOStats) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
}
|
||||
|
||||
acc.Add("reads", io.ReadCount, tags)
|
||||
acc.Add("writes", io.WriteCount, tags)
|
||||
acc.Add("read_bytes", io.ReadBytes, tags)
|
||||
acc.Add("write_bytes", io.WriteBytes, tags)
|
||||
acc.Add("read_time", io.ReadTime, tags)
|
||||
acc.Add("write_time", io.WriteTime, tags)
|
||||
acc.Add("io_time", io.IoTime, tags)
|
||||
fields := map[string]interface{}{
|
||||
"reads": io.ReadCount,
|
||||
"writes": io.WriteCount,
|
||||
"read_bytes": io.ReadBytes,
|
||||
"write_bytes": io.WriteBytes,
|
||||
"read_time": io.ReadTime,
|
||||
"write_time": io.WriteTime,
|
||||
"io_time": io.IoTime,
|
||||
}
|
||||
acc.AddFields("diskio", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -132,7 +138,7 @@ func init() {
|
|||
return &DiskStats{ps: &systemPS{}}
|
||||
})
|
||||
|
||||
plugins.Add("io", func() plugins.Plugin {
|
||||
plugins.Add("diskio", func() plugins.Plugin {
|
||||
return &DiskIOStats{ps: &systemPS{}}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -0,0 +1,165 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/disk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDiskStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
var err error
|
||||
|
||||
du := []*disk.DiskUsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Fstype: "ext4",
|
||||
Total: 128,
|
||||
Free: 23,
|
||||
InodesTotal: 1234,
|
||||
InodesFree: 234,
|
||||
},
|
||||
{
|
||||
Path: "/home",
|
||||
Fstype: "ext4",
|
||||
Total: 256,
|
||||
Free: 46,
|
||||
InodesTotal: 2468,
|
||||
InodesFree: 468,
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("DiskUsage").Return(du, nil)
|
||||
|
||||
err = (&DiskStats{ps: &mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numDiskPoints := acc.NFields()
|
||||
expectedAllDiskPoints := 12
|
||||
assert.Equal(t, expectedAllDiskPoints, numDiskPoints)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"path": "/",
|
||||
"fstype": "ext4",
|
||||
}
|
||||
tags2 := map[string]string{
|
||||
"path": "/home",
|
||||
"fstype": "ext4",
|
||||
}
|
||||
|
||||
fields1 := map[string]interface{}{
|
||||
"total": uint64(128), //tags1)
|
||||
"used": uint64(105), //tags1)
|
||||
"free": uint64(23), //tags1)
|
||||
"inodes_total": uint64(1234), //tags1)
|
||||
"inodes_free": uint64(234), //tags1)
|
||||
"inodes_used": uint64(1000), //tags1)
|
||||
}
|
||||
fields2 := map[string]interface{}{
|
||||
"total": uint64(256), //tags2)
|
||||
"used": uint64(210), //tags2)
|
||||
"free": uint64(46), //tags2)
|
||||
"inodes_total": uint64(2468), //tags2)
|
||||
"inodes_free": uint64(468), //tags2)
|
||||
"inodes_used": uint64(2000), //tags2)
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "disk", fields1, tags1)
|
||||
acc.AssertContainsTaggedFields(t, "disk", fields2, tags2)
|
||||
|
||||
// We expect 6 more DiskPoints to show up with an explicit match on "/"
|
||||
// and /home not matching the /dev in Mountpoints
|
||||
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc)
|
||||
assert.Equal(t, expectedAllDiskPoints+6, acc.NFields())
|
||||
|
||||
// We should see all the diskpoints as Mountpoints includes both
|
||||
// / and /home
|
||||
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc)
|
||||
assert.Equal(t, 2*expectedAllDiskPoints+6, acc.NFields())
|
||||
}
|
||||
|
||||
// func TestDiskIOStats(t *testing.T) {
|
||||
// var mps MockPS
|
||||
// defer mps.AssertExpectations(t)
|
||||
// var acc testutil.Accumulator
|
||||
// var err error
|
||||
|
||||
// diskio1 := disk.DiskIOCountersStat{
|
||||
// ReadCount: 888,
|
||||
// WriteCount: 5341,
|
||||
// ReadBytes: 100000,
|
||||
// WriteBytes: 200000,
|
||||
// ReadTime: 7123,
|
||||
// WriteTime: 9087,
|
||||
// Name: "sda1",
|
||||
// IoTime: 123552,
|
||||
// SerialNumber: "ab-123-ad",
|
||||
// }
|
||||
// diskio2 := disk.DiskIOCountersStat{
|
||||
// ReadCount: 444,
|
||||
// WriteCount: 2341,
|
||||
// ReadBytes: 200000,
|
||||
// WriteBytes: 400000,
|
||||
// ReadTime: 3123,
|
||||
// WriteTime: 6087,
|
||||
// Name: "sdb1",
|
||||
// IoTime: 246552,
|
||||
// SerialNumber: "bb-123-ad",
|
||||
// }
|
||||
|
||||
// mps.On("DiskIO").Return(
|
||||
// map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2},
|
||||
// nil)
|
||||
|
||||
// err = (&DiskIOStats{ps: &mps}).Gather(&acc)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// numDiskIOPoints := acc.NFields()
|
||||
// expectedAllDiskIOPoints := 14
|
||||
// assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints)
|
||||
|
||||
// dtags1 := map[string]string{
|
||||
// "name": "sda1",
|
||||
// "serial": "ab-123-ad",
|
||||
// }
|
||||
// dtags2 := map[string]string{
|
||||
// "name": "sdb1",
|
||||
// "serial": "bb-123-ad",
|
||||
// }
|
||||
|
||||
// assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1))
|
||||
// assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1))
|
||||
// assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1))
|
||||
// assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1))
|
||||
// assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1))
|
||||
// assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1))
|
||||
// assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1))
|
||||
// assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2))
|
||||
// assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2))
|
||||
// assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2))
|
||||
// assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2))
|
||||
// assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2))
|
||||
// assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2))
|
||||
// assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2))
|
||||
|
||||
// // We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1"
|
||||
// // and serial should be missing from the tags with SkipSerialNumber set
|
||||
// err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc)
|
||||
// assert.Equal(t, expectedAllDiskIOPoints+7, acc.NFields())
|
||||
|
||||
// dtags3 := map[string]string{
|
||||
// "name": "sdb1",
|
||||
// }
|
||||
|
||||
// assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3))
|
||||
// assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3))
|
||||
// assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3))
|
||||
// assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3))
|
||||
// assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3))
|
||||
// assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3))
|
||||
// assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3))
|
||||
// }
|
|
@ -36,44 +36,47 @@ func (s *DockerStats) Gather(acc plugins.Accumulator) error {
|
|||
|
||||
cts := cont.CPU
|
||||
|
||||
acc.Add("user", cts.User, tags)
|
||||
acc.Add("system", cts.System, tags)
|
||||
acc.Add("idle", cts.Idle, tags)
|
||||
acc.Add("nice", cts.Nice, tags)
|
||||
acc.Add("iowait", cts.Iowait, tags)
|
||||
acc.Add("irq", cts.Irq, tags)
|
||||
acc.Add("softirq", cts.Softirq, tags)
|
||||
acc.Add("steal", cts.Steal, tags)
|
||||
acc.Add("guest", cts.Guest, tags)
|
||||
acc.Add("guest_nice", cts.GuestNice, tags)
|
||||
fields := map[string]interface{}{
|
||||
"user": cts.User,
|
||||
"system": cts.System,
|
||||
"idle": cts.Idle,
|
||||
"nice": cts.Nice,
|
||||
"iowait": cts.Iowait,
|
||||
"irq": cts.Irq,
|
||||
"softirq": cts.Softirq,
|
||||
"steal": cts.Steal,
|
||||
"guest": cts.Guest,
|
||||
"guest_nice": cts.GuestNice,
|
||||
|
||||
acc.Add("cache", cont.Mem.Cache, tags)
|
||||
acc.Add("rss", cont.Mem.RSS, tags)
|
||||
acc.Add("rss_huge", cont.Mem.RSSHuge, tags)
|
||||
acc.Add("mapped_file", cont.Mem.MappedFile, tags)
|
||||
acc.Add("swap_in", cont.Mem.Pgpgin, tags)
|
||||
acc.Add("swap_out", cont.Mem.Pgpgout, tags)
|
||||
acc.Add("page_fault", cont.Mem.Pgfault, tags)
|
||||
acc.Add("page_major_fault", cont.Mem.Pgmajfault, tags)
|
||||
acc.Add("inactive_anon", cont.Mem.InactiveAnon, tags)
|
||||
acc.Add("active_anon", cont.Mem.ActiveAnon, tags)
|
||||
acc.Add("inactive_file", cont.Mem.InactiveFile, tags)
|
||||
acc.Add("active_file", cont.Mem.ActiveFile, tags)
|
||||
acc.Add("unevictable", cont.Mem.Unevictable, tags)
|
||||
acc.Add("memory_limit", cont.Mem.HierarchicalMemoryLimit, tags)
|
||||
acc.Add("total_cache", cont.Mem.TotalCache, tags)
|
||||
acc.Add("total_rss", cont.Mem.TotalRSS, tags)
|
||||
acc.Add("total_rss_huge", cont.Mem.TotalRSSHuge, tags)
|
||||
acc.Add("total_mapped_file", cont.Mem.TotalMappedFile, tags)
|
||||
acc.Add("total_swap_in", cont.Mem.TotalPgpgIn, tags)
|
||||
acc.Add("total_swap_out", cont.Mem.TotalPgpgOut, tags)
|
||||
acc.Add("total_page_fault", cont.Mem.TotalPgFault, tags)
|
||||
acc.Add("total_page_major_fault", cont.Mem.TotalPgMajFault, tags)
|
||||
acc.Add("total_inactive_anon", cont.Mem.TotalInactiveAnon, tags)
|
||||
acc.Add("total_active_anon", cont.Mem.TotalActiveAnon, tags)
|
||||
acc.Add("total_inactive_file", cont.Mem.TotalInactiveFile, tags)
|
||||
acc.Add("total_active_file", cont.Mem.TotalActiveFile, tags)
|
||||
acc.Add("total_unevictable", cont.Mem.TotalUnevictable, tags)
|
||||
"cache": cont.Mem.Cache,
|
||||
"rss": cont.Mem.RSS,
|
||||
"rss_huge": cont.Mem.RSSHuge,
|
||||
"mapped_file": cont.Mem.MappedFile,
|
||||
"swap_in": cont.Mem.Pgpgin,
|
||||
"swap_out": cont.Mem.Pgpgout,
|
||||
"page_fault": cont.Mem.Pgfault,
|
||||
"page_major_fault": cont.Mem.Pgmajfault,
|
||||
"inactive_anon": cont.Mem.InactiveAnon,
|
||||
"active_anon": cont.Mem.ActiveAnon,
|
||||
"inactive_file": cont.Mem.InactiveFile,
|
||||
"active_file": cont.Mem.ActiveFile,
|
||||
"unevictable": cont.Mem.Unevictable,
|
||||
"memory_limit": cont.Mem.HierarchicalMemoryLimit,
|
||||
"total_cache": cont.Mem.TotalCache,
|
||||
"total_rss": cont.Mem.TotalRSS,
|
||||
"total_rss_huge": cont.Mem.TotalRSSHuge,
|
||||
"total_mapped_file": cont.Mem.TotalMappedFile,
|
||||
"total_swap_in": cont.Mem.TotalPgpgIn,
|
||||
"total_swap_out": cont.Mem.TotalPgpgOut,
|
||||
"total_page_fault": cont.Mem.TotalPgFault,
|
||||
"total_page_major_fault": cont.Mem.TotalPgMajFault,
|
||||
"total_inactive_anon": cont.Mem.TotalInactiveAnon,
|
||||
"total_active_anon": cont.Mem.TotalActiveAnon,
|
||||
"total_inactive_file": cont.Mem.TotalInactiveFile,
|
||||
"total_active_file": cont.Mem.TotalActiveFile,
|
||||
"total_unevictable": cont.Mem.TotalUnevictable,
|
||||
}
|
||||
acc.AddFields("docker", fields, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -75,42 +75,46 @@ func TestDockerStats_GenerateStats(t *testing.T) {
|
|||
"command": "",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("user", 3.1, dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("system", 8.2, dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("idle", 80.1, dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("nice", 1.3, dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("iowait", 0.2, dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("irq", 0.1, dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("softirq", 0.11, dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("steal", 0.0001, dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("guest", 8.1, dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("guest_nice", 0.324, dockertags))
|
||||
fields := map[string]interface{}{
|
||||
"user": 3.1,
|
||||
"system": 8.2,
|
||||
"idle": 80.1,
|
||||
"nice": 1.3,
|
||||
"iowait": 0.2,
|
||||
"irq": 0.1,
|
||||
"softirq": 0.11,
|
||||
"steal": 0.0001,
|
||||
"guest": 8.1,
|
||||
"guest_nice": 0.324,
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("cache", uint64(1), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("rss", uint64(2), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("rss_huge", uint64(3), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("mapped_file", uint64(4), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("swap_in", uint64(5), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("swap_out", uint64(6), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("page_fault", uint64(7), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("page_major_fault", uint64(8), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("inactive_anon", uint64(9), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("active_anon", uint64(10), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("inactive_file", uint64(11), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("active_file", uint64(12), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("unevictable", uint64(13), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("memory_limit", uint64(14), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_cache", uint64(15), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_rss", uint64(16), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_rss_huge", uint64(17), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_mapped_file", uint64(18), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_swap_in", uint64(19), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_swap_out", uint64(20), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_page_fault", uint64(21), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_page_major_fault", uint64(22), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_inactive_anon", uint64(23), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_active_anon", uint64(24), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_inactive_file", uint64(25), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_active_file", uint64(26), dockertags))
|
||||
assert.True(t, acc.CheckTaggedValue("total_unevictable", uint64(27), dockertags))
|
||||
"cache": uint64(1),
|
||||
"rss": uint64(2),
|
||||
"rss_huge": uint64(3),
|
||||
"mapped_file": uint64(4),
|
||||
"swap_in": uint64(5),
|
||||
"swap_out": uint64(6),
|
||||
"page_fault": uint64(7),
|
||||
"page_major_fault": uint64(8),
|
||||
"inactive_anon": uint64(9),
|
||||
"active_anon": uint64(10),
|
||||
"inactive_file": uint64(11),
|
||||
"active_file": uint64(12),
|
||||
"unevictable": uint64(13),
|
||||
"memory_limit": uint64(14),
|
||||
"total_cache": uint64(15),
|
||||
"total_rss": uint64(16),
|
||||
"total_rss_huge": uint64(17),
|
||||
"total_mapped_file": uint64(18),
|
||||
"total_swap_in": uint64(19),
|
||||
"total_swap_out": uint64(20),
|
||||
"total_page_fault": uint64(21),
|
||||
"total_page_major_fault": uint64(22),
|
||||
"total_inactive_anon": uint64(23),
|
||||
"total_active_anon": uint64(24),
|
||||
"total_inactive_file": uint64(25),
|
||||
"total_active_file": uint64(26),
|
||||
"total_unevictable": uint64(27),
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "docker", fields, dockertags)
|
||||
}
|
||||
|
|
|
@ -22,18 +22,17 @@ func (s *MemStats) Gather(acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("error getting virtual memory info: %s", err)
|
||||
}
|
||||
|
||||
vmtags := map[string]string(nil)
|
||||
|
||||
acc.Add("total", vm.Total, vmtags)
|
||||
acc.Add("available", vm.Available, vmtags)
|
||||
acc.Add("used", vm.Used, vmtags)
|
||||
acc.Add("free", vm.Free, vmtags)
|
||||
acc.Add("cached", vm.Cached, vmtags)
|
||||
acc.Add("buffered", vm.Buffers, vmtags)
|
||||
acc.Add("used_percent", 100*float64(vm.Used)/float64(vm.Total), vmtags)
|
||||
acc.Add("available_percent",
|
||||
100*float64(vm.Available)/float64(vm.Total),
|
||||
vmtags)
|
||||
fields := map[string]interface{}{
|
||||
"total": vm.Total,
|
||||
"available": vm.Available,
|
||||
"used": vm.Used,
|
||||
"free": vm.Free,
|
||||
"cached": vm.Cached,
|
||||
"buffered": vm.Buffers,
|
||||
"used_percent": 100 * float64(vm.Used) / float64(vm.Total),
|
||||
"available_percent": 100 * float64(vm.Available) / float64(vm.Total),
|
||||
}
|
||||
acc.AddFields("mem", fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -54,14 +53,15 @@ func (s *SwapStats) Gather(acc plugins.Accumulator) error {
|
|||
return fmt.Errorf("error getting swap memory info: %s", err)
|
||||
}
|
||||
|
||||
swaptags := map[string]string(nil)
|
||||
|
||||
acc.Add("total", swap.Total, swaptags)
|
||||
acc.Add("used", swap.Used, swaptags)
|
||||
acc.Add("free", swap.Free, swaptags)
|
||||
acc.Add("used_percent", swap.UsedPercent, swaptags)
|
||||
acc.Add("in", swap.Sin, swaptags)
|
||||
acc.Add("out", swap.Sout, swaptags)
|
||||
fields := map[string]interface{}{
|
||||
"total": swap.Total,
|
||||
"used": swap.Used,
|
||||
"free": swap.Free,
|
||||
"used_percent": swap.UsedPercent,
|
||||
"in": swap.Sin,
|
||||
"out": swap.Sout,
|
||||
}
|
||||
acc.AddFields("swap", fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/mem"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMemStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
var err error
|
||||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
vms := &mem.VirtualMemoryStat{
|
||||
Total: 12400,
|
||||
Available: 7600,
|
||||
Used: 5000,
|
||||
Free: 1235,
|
||||
// Active: 8134,
|
||||
// Inactive: 1124,
|
||||
// Buffers: 771,
|
||||
// Cached: 4312,
|
||||
// Wired: 134,
|
||||
// Shared: 2142,
|
||||
}
|
||||
|
||||
mps.On("VMStat").Return(vms, nil)
|
||||
|
||||
sms := &mem.SwapMemoryStat{
|
||||
Total: 8123,
|
||||
Used: 1232,
|
||||
Free: 6412,
|
||||
UsedPercent: 12.2,
|
||||
Sin: 7,
|
||||
Sout: 830,
|
||||
}
|
||||
|
||||
mps.On("SwapStat").Return(sms, nil)
|
||||
|
||||
err = (&MemStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
memfields := map[string]interface{}{
|
||||
"total": uint64(12400),
|
||||
"available": uint64(7600),
|
||||
"used": uint64(5000),
|
||||
"available_percent": float64(7600) / float64(12400) * 100,
|
||||
"used_percent": float64(5000) / float64(12400) * 100,
|
||||
"free": uint64(1235),
|
||||
"cached": uint64(0),
|
||||
"buffered": uint64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string))
|
||||
|
||||
acc.Points = nil
|
||||
|
||||
err = (&SwapStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
swapfields := map[string]interface{}{
|
||||
"total": uint64(8123),
|
||||
"used": uint64(1232),
|
||||
"used_percent": float64(12.2),
|
||||
"free": uint64(6412),
|
||||
"in": uint64(7),
|
||||
"out": uint64(830),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "swap", swapfields, make(map[string]string))
|
||||
}
|
|
@ -70,26 +70,31 @@ func (s *NetIOStats) Gather(acc plugins.Accumulator) error {
|
|||
"interface": io.Name,
|
||||
}
|
||||
|
||||
acc.Add("bytes_sent", io.BytesSent, tags)
|
||||
acc.Add("bytes_recv", io.BytesRecv, tags)
|
||||
acc.Add("packets_sent", io.PacketsSent, tags)
|
||||
acc.Add("packets_recv", io.PacketsRecv, tags)
|
||||
acc.Add("err_in", io.Errin, tags)
|
||||
acc.Add("err_out", io.Errout, tags)
|
||||
acc.Add("drop_in", io.Dropin, tags)
|
||||
acc.Add("drop_out", io.Dropout, tags)
|
||||
fields := map[string]interface{}{
|
||||
"bytes_sent": io.BytesSent,
|
||||
"bytes_recv": io.BytesRecv,
|
||||
"packets_sent": io.PacketsSent,
|
||||
"packets_recv": io.PacketsRecv,
|
||||
"err_in": io.Errin,
|
||||
"err_out": io.Errout,
|
||||
"drop_in": io.Dropin,
|
||||
"drop_out": io.Dropout,
|
||||
}
|
||||
acc.AddFields("net", fields, tags)
|
||||
}
|
||||
|
||||
// Get system wide stats for different network protocols
|
||||
// (ignore these stats if the call fails)
|
||||
netprotos, _ := s.ps.NetProto()
|
||||
fields := make(map[string]interface{})
|
||||
for _, proto := range netprotos {
|
||||
for stat, value := range proto.Stats {
|
||||
name := fmt.Sprintf("%s_%s", strings.ToLower(proto.Protocol),
|
||||
strings.ToLower(stat))
|
||||
acc.Add(name, value, nil)
|
||||
fields[name] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("net", fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/net"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNetStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
var err error
|
||||
defer mps.AssertExpectations(t)
|
||||
var acc testutil.Accumulator
|
||||
|
||||
netio := net.NetIOCountersStat{
|
||||
Name: "eth0",
|
||||
BytesSent: 1123,
|
||||
BytesRecv: 8734422,
|
||||
PacketsSent: 781,
|
||||
PacketsRecv: 23456,
|
||||
Errin: 832,
|
||||
Errout: 8,
|
||||
Dropin: 7,
|
||||
Dropout: 1,
|
||||
}
|
||||
|
||||
mps.On("NetIO").Return([]net.NetIOCountersStat{netio}, nil)
|
||||
|
||||
netprotos := []net.NetProtoCountersStat{
|
||||
net.NetProtoCountersStat{
|
||||
Protocol: "Udp",
|
||||
Stats: map[string]int64{
|
||||
"InDatagrams": 4655,
|
||||
"NoPorts": 892592,
|
||||
},
|
||||
},
|
||||
}
|
||||
mps.On("NetProto").Return(netprotos, nil)
|
||||
|
||||
netstats := []net.NetConnectionStat{
|
||||
net.NetConnectionStat{
|
||||
Type: syscall.SOCK_DGRAM,
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "CLOSE",
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("NetConnections").Return(netstats, nil)
|
||||
|
||||
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
ntags := map[string]string{
|
||||
"interface": "eth0",
|
||||
}
|
||||
|
||||
fields1 := map[string]interface{}{
|
||||
"bytes_sent": uint64(1123),
|
||||
"bytes_recv": uint64(8734422),
|
||||
"packets_sent": uint64(781),
|
||||
"packets_recv": uint64(23456),
|
||||
"err_in": uint64(832),
|
||||
"err_out": uint64(8),
|
||||
"drop_in": uint64(7),
|
||||
"drop_out": uint64(1),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "net", fields1, ntags)
|
||||
|
||||
fields2 := map[string]interface{}{
|
||||
"udp_noports": int64(892592),
|
||||
"udp_indatagrams": int64(4655),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "net", fields2, make(map[string]string))
|
||||
|
||||
acc.Points = nil
|
||||
|
||||
err = (&NetStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fields3 := map[string]interface{}{
|
||||
"tcp_established": 2,
|
||||
"tcp_syn_sent": 0,
|
||||
"tcp_syn_recv": 0,
|
||||
"tcp_fin_wait1": 0,
|
||||
"tcp_fin_wait2": 0,
|
||||
"tcp_time_wait": 0,
|
||||
"tcp_close": 1,
|
||||
"tcp_close_wait": 0,
|
||||
"tcp_last_ack": 0,
|
||||
"tcp_listen": 0,
|
||||
"tcp_closing": 0,
|
||||
"tcp_none": 0,
|
||||
"udp_socket": 1,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "netstat", fields3, make(map[string]string))
|
||||
}
|
|
@ -42,19 +42,23 @@ func (s *NetStats) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
counts[netcon.Status] = c + 1
|
||||
}
|
||||
acc.Add("tcp_established", counts["ESTABLISHED"], tags)
|
||||
acc.Add("tcp_syn_sent", counts["SYN_SENT"], tags)
|
||||
acc.Add("tcp_syn_recv", counts["SYN_RECV"], tags)
|
||||
acc.Add("tcp_fin_wait1", counts["FIN_WAIT1"], tags)
|
||||
acc.Add("tcp_fin_wait2", counts["FIN_WAIT2"], tags)
|
||||
acc.Add("tcp_time_wait", counts["TIME_WAIT"], tags)
|
||||
acc.Add("tcp_close", counts["CLOSE"], tags)
|
||||
acc.Add("tcp_close_wait", counts["CLOSE_WAIT"], tags)
|
||||
acc.Add("tcp_last_ack", counts["LAST_ACK"], tags)
|
||||
acc.Add("tcp_listen", counts["LISTEN"], tags)
|
||||
acc.Add("tcp_closing", counts["CLOSING"], tags)
|
||||
acc.Add("tcp_none", counts["NONE"], tags)
|
||||
acc.Add("udp_socket", counts["UDP"], tags)
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"tcp_established": counts["ESTABLISHED"],
|
||||
"tcp_syn_sent": counts["SYN_SENT"],
|
||||
"tcp_syn_recv": counts["SYN_RECV"],
|
||||
"tcp_fin_wait1": counts["FIN_WAIT1"],
|
||||
"tcp_fin_wait2": counts["FIN_WAIT2"],
|
||||
"tcp_time_wait": counts["TIME_WAIT"],
|
||||
"tcp_close": counts["CLOSE"],
|
||||
"tcp_close_wait": counts["CLOSE_WAIT"],
|
||||
"tcp_last_ack": counts["LAST_ACK"],
|
||||
"tcp_listen": counts["LISTEN"],
|
||||
"tcp_closing": counts["CLOSING"],
|
||||
"tcp_none": counts["NONE"],
|
||||
"udp_socket": counts["UDP"],
|
||||
}
|
||||
acc.AddFields("netstat", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -19,13 +19,6 @@ func (_ *SystemStats) Description() string {
|
|||
|
||||
func (_ *SystemStats) SampleConfig() string { return "" }
|
||||
|
||||
func (_ *SystemStats) add(acc plugins.Accumulator,
|
||||
name string, val float64, tags map[string]string) {
|
||||
if val >= 0 {
|
||||
acc.Add(name, val, tags)
|
||||
}
|
||||
}
|
||||
|
||||
func (_ *SystemStats) Gather(acc plugins.Accumulator) error {
|
||||
loadavg, err := load.LoadAvg()
|
||||
if err != nil {
|
||||
|
@ -37,11 +30,14 @@ func (_ *SystemStats) Gather(acc plugins.Accumulator) error {
|
|||
return err
|
||||
}
|
||||
|
||||
acc.Add("load1", loadavg.Load1, nil)
|
||||
acc.Add("load5", loadavg.Load5, nil)
|
||||
acc.Add("load15", loadavg.Load15, nil)
|
||||
acc.Add("uptime", float64(hostinfo.Uptime), nil)
|
||||
acc.Add("uptime_format", format_uptime(hostinfo.Uptime), nil)
|
||||
fields := map[string]interface{}{
|
||||
"load1": loadavg.Load1,
|
||||
"load5": loadavg.Load5,
|
||||
"load15": loadavg.Load15,
|
||||
"uptime": hostinfo.Uptime,
|
||||
"uptime_format": format_uptime(hostinfo.Uptime),
|
||||
}
|
||||
acc.AddFields("system", fields, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,426 +0,0 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/influxdb/telegraf/testutil"
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
"github.com/shirou/gopsutil/disk"
|
||||
"github.com/shirou/gopsutil/mem"
|
||||
"github.com/shirou/gopsutil/net"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSystemStats_GenerateStats(t *testing.T) {
|
||||
var mps MockPS
|
||||
|
||||
defer mps.AssertExpectations(t)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
cts := cpu.CPUTimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 3.1,
|
||||
System: 8.2,
|
||||
Idle: 80.1,
|
||||
Nice: 1.3,
|
||||
Iowait: 0.2,
|
||||
Irq: 0.1,
|
||||
Softirq: 0.11,
|
||||
Steal: 0.0511,
|
||||
Guest: 8.1,
|
||||
GuestNice: 0.324,
|
||||
}
|
||||
|
||||
cts2 := cpu.CPUTimesStat{
|
||||
CPU: "cpu0",
|
||||
User: 11.4, // increased by 8.3
|
||||
System: 10.9, // increased by 2.7
|
||||
Idle: 158.8699, // increased by 78.7699 (for total increase of 100)
|
||||
Nice: 2.5, // increased by 1.2
|
||||
Iowait: 0.7, // increased by 0.5
|
||||
Irq: 1.2, // increased by 1.1
|
||||
Softirq: 0.31, // increased by 0.2
|
||||
Steal: 0.2812, // increased by 0.0001
|
||||
Guest: 12.9, // increased by 4.8
|
||||
GuestNice: 2.524, // increased by 2.2
|
||||
}
|
||||
|
||||
mps.On("CPUTimes").Return([]cpu.CPUTimesStat{cts}, nil)
|
||||
|
||||
du := []*disk.DiskUsageStat{
|
||||
{
|
||||
Path: "/",
|
||||
Fstype: "ext4",
|
||||
Total: 128,
|
||||
Free: 23,
|
||||
InodesTotal: 1234,
|
||||
InodesFree: 234,
|
||||
},
|
||||
{
|
||||
Path: "/home",
|
||||
Fstype: "ext4",
|
||||
Total: 256,
|
||||
Free: 46,
|
||||
InodesTotal: 2468,
|
||||
InodesFree: 468,
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("DiskUsage").Return(du, nil)
|
||||
|
||||
diskio1 := disk.DiskIOCountersStat{
|
||||
|
||||
ReadCount: 888,
|
||||
WriteCount: 5341,
|
||||
ReadBytes: 100000,
|
||||
WriteBytes: 200000,
|
||||
ReadTime: 7123,
|
||||
WriteTime: 9087,
|
||||
Name: "sda1",
|
||||
IoTime: 123552,
|
||||
SerialNumber: "ab-123-ad",
|
||||
}
|
||||
diskio2 := disk.DiskIOCountersStat{
|
||||
ReadCount: 444,
|
||||
WriteCount: 2341,
|
||||
ReadBytes: 200000,
|
||||
WriteBytes: 400000,
|
||||
ReadTime: 3123,
|
||||
WriteTime: 6087,
|
||||
Name: "sdb1",
|
||||
IoTime: 246552,
|
||||
SerialNumber: "bb-123-ad",
|
||||
}
|
||||
|
||||
mps.On("DiskIO").Return(map[string]disk.DiskIOCountersStat{"sda1": diskio1, "sdb1": diskio2}, nil)
|
||||
|
||||
netio := net.NetIOCountersStat{
|
||||
Name: "eth0",
|
||||
BytesSent: 1123,
|
||||
BytesRecv: 8734422,
|
||||
PacketsSent: 781,
|
||||
PacketsRecv: 23456,
|
||||
Errin: 832,
|
||||
Errout: 8,
|
||||
Dropin: 7,
|
||||
Dropout: 1,
|
||||
}
|
||||
|
||||
mps.On("NetIO").Return([]net.NetIOCountersStat{netio}, nil)
|
||||
|
||||
netprotos := []net.NetProtoCountersStat{
|
||||
net.NetProtoCountersStat{
|
||||
Protocol: "Udp",
|
||||
Stats: map[string]int64{
|
||||
"InDatagrams": 4655,
|
||||
"NoPorts": 892592,
|
||||
},
|
||||
},
|
||||
}
|
||||
mps.On("NetProto").Return(netprotos, nil)
|
||||
|
||||
vms := &mem.VirtualMemoryStat{
|
||||
Total: 12400,
|
||||
Available: 7600,
|
||||
Used: 5000,
|
||||
Free: 1235,
|
||||
// Active: 8134,
|
||||
// Inactive: 1124,
|
||||
// Buffers: 771,
|
||||
// Cached: 4312,
|
||||
// Wired: 134,
|
||||
// Shared: 2142,
|
||||
}
|
||||
|
||||
mps.On("VMStat").Return(vms, nil)
|
||||
|
||||
sms := &mem.SwapMemoryStat{
|
||||
Total: 8123,
|
||||
Used: 1232,
|
||||
Free: 6412,
|
||||
UsedPercent: 12.2,
|
||||
Sin: 7,
|
||||
Sout: 830,
|
||||
}
|
||||
|
||||
mps.On("SwapStat").Return(sms, nil)
|
||||
|
||||
netstats := []net.NetConnectionStat{
|
||||
net.NetConnectionStat{
|
||||
Type: syscall.SOCK_DGRAM,
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "ESTABLISHED",
|
||||
},
|
||||
net.NetConnectionStat{
|
||||
Status: "CLOSE",
|
||||
},
|
||||
}
|
||||
|
||||
mps.On("NetConnections").Return(netstats, nil)
|
||||
|
||||
cs := NewCPUStats(&mps)
|
||||
|
||||
cputags := map[string]string{
|
||||
"cpu": "cpu0",
|
||||
}
|
||||
|
||||
preCPUPoints := len(acc.Points)
|
||||
err := cs.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
numCPUPoints := len(acc.Points) - preCPUPoints
|
||||
|
||||
expectedCPUPoints := 10
|
||||
assert.Equal(t, expectedCPUPoints, numCPUPoints)
|
||||
|
||||
// Computed values are checked with delta > 0 becasue of floating point arithmatic
|
||||
// imprecision
|
||||
assertContainsTaggedFloat(t, &acc, "time_user", 3.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_iowait", 0.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_irq", 0.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest", 8.1, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0, cputags)
|
||||
|
||||
mps2 := MockPS{}
|
||||
mps2.On("CPUTimes").Return([]cpu.CPUTimesStat{cts2}, nil)
|
||||
cs.ps = &mps2
|
||||
|
||||
// Should have added cpu percentages too
|
||||
err = cs.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numCPUPoints = len(acc.Points) - (preCPUPoints + numCPUPoints)
|
||||
expectedCPUPoints = 20
|
||||
assert.Equal(t, expectedCPUPoints, numCPUPoints)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "time_user", 11.4, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_idle", 158.8699, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_nice", 2.5, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_iowait", 0.7, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest", 12.9, 0, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0, cputags)
|
||||
|
||||
assertContainsTaggedFloat(t, &acc, "usage_user", 8.3, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_idle", 78.7699, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_nice", 1.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.5, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_irq", 1.1, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_guest", 4.8, 0.0005, cputags)
|
||||
assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005, cputags)
|
||||
|
||||
preDiskPoints := len(acc.Points)
|
||||
|
||||
err = (&DiskStats{ps: &mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numDiskPoints := len(acc.Points) - preDiskPoints
|
||||
expectedAllDiskPoints := 12
|
||||
assert.Equal(t, expectedAllDiskPoints, numDiskPoints)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"path": "/",
|
||||
"fstype": "ext4",
|
||||
}
|
||||
tags2 := map[string]string{
|
||||
"path": "/home",
|
||||
"fstype": "ext4",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("total", uint64(128), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("used", uint64(105), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("free", uint64(23), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(1234), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(234), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(1000), tags1))
|
||||
assert.True(t, acc.CheckTaggedValue("total", uint64(256), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("used", uint64(210), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("free", uint64(46), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_total", uint64(2468), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_free", uint64(468), tags2))
|
||||
assert.True(t, acc.CheckTaggedValue("inodes_used", uint64(2000), tags2))
|
||||
|
||||
// We expect 6 more DiskPoints to show up with an explicit match on "/"
|
||||
// and /home not matching the /dev in Mountpoints
|
||||
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/dev"}}).Gather(&acc)
|
||||
assert.Equal(t, preDiskPoints+expectedAllDiskPoints+6, len(acc.Points))
|
||||
|
||||
// We should see all the diskpoints as Mountpoints includes both
|
||||
// / and /home
|
||||
err = (&DiskStats{ps: &mps, Mountpoints: []string{"/", "/home"}}).Gather(&acc)
|
||||
assert.Equal(t, preDiskPoints+2*expectedAllDiskPoints+6, len(acc.Points))
|
||||
|
||||
err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
ntags := map[string]string{
|
||||
"interface": "eth0",
|
||||
}
|
||||
|
||||
assert.NoError(t, acc.ValidateTaggedValue("bytes_sent", uint64(1123), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("bytes_recv", uint64(8734422), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("packets_sent", uint64(781), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("packets_recv", uint64(23456), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("err_in", uint64(832), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("err_out", uint64(8), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("drop_in", uint64(7), ntags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("drop_out", uint64(1), ntags))
|
||||
assert.NoError(t, acc.ValidateValue("udp_noports", int64(892592)))
|
||||
assert.NoError(t, acc.ValidateValue("udp_indatagrams", int64(4655)))
|
||||
|
||||
preDiskIOPoints := len(acc.Points)
|
||||
|
||||
err = (&DiskIOStats{ps: &mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numDiskIOPoints := len(acc.Points) - preDiskIOPoints
|
||||
expectedAllDiskIOPoints := 14
|
||||
assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints)
|
||||
|
||||
dtags1 := map[string]string{
|
||||
"name": "sda1",
|
||||
"serial": "ab-123-ad",
|
||||
}
|
||||
dtags2 := map[string]string{
|
||||
"name": "sdb1",
|
||||
"serial": "bb-123-ad",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("reads", uint64(888), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("writes", uint64(5341), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(100000), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(200000), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("read_time", uint64(7123), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("write_time", uint64(9087), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("io_time", uint64(123552), dtags1))
|
||||
assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2))
|
||||
assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2))
|
||||
|
||||
// We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1"
|
||||
// and serial should be missing from the tags with SkipSerialNumber set
|
||||
err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc)
|
||||
assert.Equal(t, preDiskIOPoints+expectedAllDiskIOPoints+7, len(acc.Points))
|
||||
|
||||
dtags3 := map[string]string{
|
||||
"name": "sdb1",
|
||||
}
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("reads", uint64(444), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("writes", uint64(2341), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("read_bytes", uint64(200000), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("write_bytes", uint64(400000), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("read_time", uint64(3123), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags3))
|
||||
assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags3))
|
||||
|
||||
err = (&MemStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
vmtags := map[string]string(nil)
|
||||
|
||||
assert.True(t, acc.CheckTaggedValue("total", uint64(12400), vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("available", uint64(7600), vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("used", uint64(5000), vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("available_percent",
|
||||
float64(7600)/float64(12400)*100,
|
||||
vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("used_percent",
|
||||
float64(5000)/float64(12400)*100,
|
||||
vmtags))
|
||||
assert.True(t, acc.CheckTaggedValue("free", uint64(1235), vmtags))
|
||||
|
||||
acc.Points = nil
|
||||
|
||||
err = (&SwapStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
swaptags := map[string]string(nil)
|
||||
|
||||
assert.NoError(t, acc.ValidateTaggedValue("total", uint64(8123), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("used", uint64(1232), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("used_percent", float64(12.2), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("free", uint64(6412), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("in", uint64(7), swaptags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("out", uint64(830), swaptags))
|
||||
|
||||
acc.Points = nil
|
||||
|
||||
err = (&NetStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
netstattags := map[string]string(nil)
|
||||
|
||||
assert.NoError(t, acc.ValidateTaggedValue("tcp_established", 2, netstattags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("tcp_close", 1, netstattags))
|
||||
assert.NoError(t, acc.ValidateTaggedValue("udp_socket", 1, netstattags))
|
||||
|
||||
}
|
||||
|
||||
// Asserts that a given accumulator contains a measurment of type float64 with
|
||||
// specific tags within a certain distance of a given expected value. Asserts a failure
|
||||
// if the measurement is of the wrong type, or if no matching measurements are found
|
||||
//
|
||||
// Paramaters:
|
||||
// t *testing.T : Testing object to use
|
||||
// acc testutil.Accumulator: Accumulator to examine
|
||||
// measurement string : Name of the measurement to examine
|
||||
// expectedValue float64 : Value to search for within the measurement
|
||||
// delta float64 : Maximum acceptable distance of an accumulated value
|
||||
// from the expectedValue parameter. Useful when
|
||||
// floating-point arithmatic imprecision makes looking
|
||||
// for an exact match impractical
|
||||
// tags map[string]string : Tag set the found measurement must have. Set to nil to
|
||||
// ignore the tag set.
|
||||
func assertContainsTaggedFloat(
|
||||
t *testing.T,
|
||||
acc *testutil.Accumulator,
|
||||
measurement string,
|
||||
expectedValue float64,
|
||||
delta float64,
|
||||
tags map[string]string,
|
||||
) {
|
||||
var actualValue float64
|
||||
for _, pt := range acc.Points {
|
||||
if pt.Measurement == measurement {
|
||||
if (tags == nil) || reflect.DeepEqual(pt.Tags, tags) {
|
||||
if value, ok := pt.Fields["value"].(float64); ok {
|
||||
actualValue = value
|
||||
if (value >= expectedValue-delta) && (value <= expectedValue+delta) {
|
||||
// Found the point, return without failing
|
||||
return
|
||||
}
|
||||
} else {
|
||||
assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64",
|
||||
measurement))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
msg := fmt.Sprintf("Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f",
|
||||
measurement, delta, expectedValue, actualValue)
|
||||
assert.Fail(t, msg)
|
||||
}
|
|
@ -41,6 +41,5 @@ func (s *Trig) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
|
||||
func init() {
|
||||
|
||||
plugins.Add("Trig", func() plugins.Plugin { return &Trig{x: 0.0} })
|
||||
}
|
||||
|
|
|
@ -5,28 +5,21 @@ import (
|
|||
"errors"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdb/telegraf/plugins"
|
||||
)
|
||||
|
||||
type Twemproxy struct {
|
||||
Instances []TwemproxyInstance
|
||||
}
|
||||
|
||||
type TwemproxyInstance struct {
|
||||
Addr string
|
||||
Pools []string
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
[[plugins.twemproxy.instances]]
|
||||
# Twemproxy stats address and port (no scheme)
|
||||
addr = "localhost:22222"
|
||||
# Monitor pool name
|
||||
pools = ["redis_pool", "mc_pool"]
|
||||
# Twemproxy stats address and port (no scheme)
|
||||
addr = "localhost:22222"
|
||||
# Monitor pool name
|
||||
pools = ["redis_pool", "mc_pool"]
|
||||
`
|
||||
|
||||
func (t *Twemproxy) SampleConfig() string {
|
||||
|
@ -39,35 +32,7 @@ func (t *Twemproxy) Description() string {
|
|||
|
||||
// Gather data from all Twemproxy instances
|
||||
func (t *Twemproxy) Gather(acc plugins.Accumulator) error {
|
||||
var wg sync.WaitGroup
|
||||
errorChan := make(chan error, len(t.Instances))
|
||||
for _, inst := range t.Instances {
|
||||
wg.Add(1)
|
||||
go func(inst TwemproxyInstance) {
|
||||
defer wg.Done()
|
||||
if err := inst.Gather(acc); err != nil {
|
||||
errorChan <- err
|
||||
}
|
||||
}(inst)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
close(errorChan)
|
||||
errs := []string{}
|
||||
for err := range errorChan {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
|
||||
// Gather data from one Twemproxy
|
||||
func (ti *TwemproxyInstance) Gather(
|
||||
acc plugins.Accumulator,
|
||||
) error {
|
||||
conn, err := net.DialTimeout("tcp", ti.Addr, 1*time.Second)
|
||||
conn, err := net.DialTimeout("tcp", t.Addr, 1*time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -82,14 +47,14 @@ func (ti *TwemproxyInstance) Gather(
|
|||
}
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["twemproxy"] = ti.Addr
|
||||
ti.processStat(acc, tags, stats)
|
||||
tags["twemproxy"] = t.Addr
|
||||
t.processStat(acc, tags, stats)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Process Twemproxy server stats
|
||||
func (ti *TwemproxyInstance) processStat(
|
||||
func (t *Twemproxy) processStat(
|
||||
acc plugins.Accumulator,
|
||||
tags map[string]string,
|
||||
data map[string]interface{},
|
||||
|
@ -100,40 +65,42 @@ func (ti *TwemproxyInstance) processStat(
|
|||
}
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
metrics := []string{"total_connections", "curr_connections", "timestamp"}
|
||||
for _, m := range metrics {
|
||||
if value, ok := data[m]; ok {
|
||||
if val, ok := value.(float64); ok {
|
||||
acc.Add(m, val, tags)
|
||||
fields[m] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("twemproxy", fields, tags)
|
||||
|
||||
for _, pool := range ti.Pools {
|
||||
for _, pool := range t.Pools {
|
||||
if poolStat, ok := data[pool]; ok {
|
||||
if data, ok := poolStat.(map[string]interface{}); ok {
|
||||
poolTags := copyTags(tags)
|
||||
poolTags["pool"] = pool
|
||||
ti.processPool(acc, poolTags, pool+"_", data)
|
||||
t.processPool(acc, poolTags, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process pool data in Twemproxy stats
|
||||
func (ti *TwemproxyInstance) processPool(
|
||||
func (t *Twemproxy) processPool(
|
||||
acc plugins.Accumulator,
|
||||
tags map[string]string,
|
||||
prefix string,
|
||||
data map[string]interface{},
|
||||
) {
|
||||
serverTags := make(map[string]map[string]string)
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range data {
|
||||
switch key {
|
||||
case "client_connections", "forward_error", "client_err", "server_ejects", "fragments", "client_eof":
|
||||
if val, ok := value.(float64); ok {
|
||||
acc.Add(prefix+key, val, tags)
|
||||
fields[key] = val
|
||||
}
|
||||
default:
|
||||
if data, ok := value.(map[string]interface{}); ok {
|
||||
|
@ -141,27 +108,29 @@ func (ti *TwemproxyInstance) processPool(
|
|||
serverTags[key] = copyTags(tags)
|
||||
serverTags[key]["server"] = key
|
||||
}
|
||||
ti.processServer(acc, serverTags[key], prefix, data)
|
||||
t.processServer(acc, serverTags[key], data)
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("twemproxy_pool", fields, tags)
|
||||
}
|
||||
|
||||
// Process backend server(redis/memcached) stats
|
||||
func (ti *TwemproxyInstance) processServer(
|
||||
func (t *Twemproxy) processServer(
|
||||
acc plugins.Accumulator,
|
||||
tags map[string]string,
|
||||
prefix string,
|
||||
data map[string]interface{},
|
||||
) {
|
||||
fields := make(map[string]interface{})
|
||||
for key, value := range data {
|
||||
switch key {
|
||||
default:
|
||||
if val, ok := value.(float64); ok {
|
||||
acc.Add(prefix+key, val, tags)
|
||||
fields[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
acc.AddFields("twemproxy_pool", fields, tags)
|
||||
}
|
||||
|
||||
// Tags is not expected to be mutated after passing to Add.
|
||||
|
|
|
@ -88,15 +88,15 @@ func gatherPoolStats(pool poolInfo, acc plugins.Accumulator) error {
|
|||
}
|
||||
|
||||
tag := map[string]string{"pool": pool.name}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for i := 0; i < keyCount; i++ {
|
||||
value, err := strconv.ParseInt(values[i], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acc.Add(keys[i], value, tag)
|
||||
fields[keys[i]] = value
|
||||
}
|
||||
acc.AddFields("zfs_pool", fields, tag)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -124,6 +124,7 @@ func (z *Zfs) Gather(acc plugins.Accumulator) error {
|
|||
}
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for _, metric := range kstatMetrics {
|
||||
lines, err := internal.ReadLines(kstatPath + "/" + metric)
|
||||
if err != nil {
|
||||
|
@ -140,9 +141,10 @@ func (z *Zfs) Gather(acc plugins.Accumulator) error {
|
|||
key := metric + "_" + rawData[0]
|
||||
rawValue := rawData[len(rawData)-1]
|
||||
value, _ := strconv.ParseInt(rawValue, 10, 64)
|
||||
acc.Add(key, value, tags)
|
||||
fields[key] = value
|
||||
}
|
||||
}
|
||||
acc.AddFields("zfs", fields, tags)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -67,35 +67,37 @@ func (z *Zookeeper) gatherServer(address string, acc plugins.Accumulator) error
|
|||
defer c.Close()
|
||||
|
||||
fmt.Fprintf(c, "%s\n", "mntr")
|
||||
|
||||
rdr := bufio.NewReader(c)
|
||||
|
||||
scanner := bufio.NewScanner(rdr)
|
||||
|
||||
service := strings.Split(address, ":")
|
||||
if len(service) != 2 {
|
||||
return fmt.Errorf("Invalid service address: %s", address)
|
||||
}
|
||||
tags := map[string]string{"server": service[0], "port": service[1]}
|
||||
|
||||
fields := make(map[string]interface{})
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
re := regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`)
|
||||
parts := re.FindStringSubmatch(string(line))
|
||||
|
||||
service := strings.Split(address, ":")
|
||||
|
||||
if len(parts) != 3 || len(service) != 2 {
|
||||
if len(parts) != 3 {
|
||||
return fmt.Errorf("unexpected line in mntr response: %q", line)
|
||||
}
|
||||
|
||||
tags := map[string]string{"server": service[0], "port": service[1]}
|
||||
|
||||
measurement := strings.TrimPrefix(parts[1], "zk_")
|
||||
sValue := string(parts[2])
|
||||
|
||||
iVal, err := strconv.ParseInt(sValue, 10, 64)
|
||||
if err == nil {
|
||||
acc.Add(measurement, iVal, tags)
|
||||
fields[measurement] = iVal
|
||||
} else {
|
||||
acc.Add(measurement, sValue, tags)
|
||||
fields[measurement] = sValue
|
||||
}
|
||||
}
|
||||
acc.AddFields("zookeeper", fields, tags)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,7 +4,10 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Point defines a single point measurement
|
||||
|
@ -106,71 +109,24 @@ func (a *Accumulator) Get(measurement string) (*Point, bool) {
|
|||
return nil, false
|
||||
}
|
||||
|
||||
// CheckValue calls CheckFieldsValue passing a single-value map as fields
|
||||
func (a *Accumulator) CheckValue(measurement string, val interface{}) bool {
|
||||
return a.CheckFieldsValue(measurement, map[string]interface{}{"value": val})
|
||||
}
|
||||
|
||||
// CheckValue checks that the accumulators point for the given measurement
|
||||
// is the same as the given value.
|
||||
func (a *Accumulator) CheckFieldsValue(measurement string, fields map[string]interface{}) bool {
|
||||
for _, p := range a.Points {
|
||||
if p.Measurement == measurement {
|
||||
if reflect.DeepEqual(fields, p.Fields) {
|
||||
return true
|
||||
} else {
|
||||
fmt.Printf("Measurement %s Failure, expected: %v, got %v\n",
|
||||
measurement, fields, p.Fields)
|
||||
return false
|
||||
}
|
||||
// NFields returns the total number of fields in the accumulator, across all
|
||||
// measurements
|
||||
func (a *Accumulator) NFields() int {
|
||||
counter := 0
|
||||
for _, pt := range a.Points {
|
||||
for _, _ = range pt.Fields {
|
||||
counter++
|
||||
}
|
||||
}
|
||||
fmt.Printf("Measurement %s, fields %s not found\n", measurement, fields)
|
||||
return false
|
||||
return counter
|
||||
}
|
||||
|
||||
// CheckTaggedValue calls ValidateTaggedValue
|
||||
func (a *Accumulator) CheckTaggedValue(
|
||||
measurement string,
|
||||
val interface{},
|
||||
tags map[string]string,
|
||||
) bool {
|
||||
return a.ValidateTaggedValue(measurement, val, tags) == nil
|
||||
}
|
||||
|
||||
// ValidateTaggedValue calls ValidateTaggedFieldsValue passing a single-value map as fields
|
||||
func (a *Accumulator) ValidateTaggedValue(
|
||||
measurement string,
|
||||
val interface{},
|
||||
tags map[string]string,
|
||||
) error {
|
||||
return a.ValidateTaggedFieldsValue(measurement, map[string]interface{}{"value": val}, tags)
|
||||
}
|
||||
|
||||
// ValidateValue calls ValidateTaggedValue
|
||||
func (a *Accumulator) ValidateValue(measurement string, val interface{}) error {
|
||||
return a.ValidateTaggedValue(measurement, val, nil)
|
||||
}
|
||||
|
||||
// CheckTaggedFieldsValue calls ValidateTaggedFieldsValue
|
||||
func (a *Accumulator) CheckTaggedFieldsValue(
|
||||
func (a *Accumulator) AssertContainsTaggedFields(
|
||||
t *testing.T,
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
) bool {
|
||||
return a.ValidateTaggedFieldsValue(measurement, fields, tags) == nil
|
||||
}
|
||||
|
||||
// ValidateTaggedValue validates that the given measurement and value exist
|
||||
// in the accumulator and with the given tags.
|
||||
func (a *Accumulator) ValidateTaggedFieldsValue(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
) error {
|
||||
if tags == nil {
|
||||
tags = map[string]string{}
|
||||
}
|
||||
) {
|
||||
for _, p := range a.Points {
|
||||
if !reflect.DeepEqual(tags, p.Tags) {
|
||||
continue
|
||||
|
@ -178,53 +134,46 @@ func (a *Accumulator) ValidateTaggedFieldsValue(
|
|||
|
||||
if p.Measurement == measurement {
|
||||
if !reflect.DeepEqual(fields, p.Fields) {
|
||||
return fmt.Errorf("%v != %v ", fields, p.Fields)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown measurement %s with tags %v", measurement, tags)
|
||||
}
|
||||
|
||||
// ValidateFieldsValue calls ValidateTaggedFieldsValue
|
||||
func (a *Accumulator) ValidateFieldsValue(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
) error {
|
||||
return a.ValidateTaggedValue(measurement, fields, nil)
|
||||
}
|
||||
|
||||
func (a *Accumulator) ValidateTaggedFields(
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
tags map[string]string,
|
||||
) error {
|
||||
if tags == nil {
|
||||
tags = map[string]string{}
|
||||
}
|
||||
for _, p := range a.Points {
|
||||
if !reflect.DeepEqual(tags, p.Tags) {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.Measurement == measurement {
|
||||
if !reflect.DeepEqual(fields, p.Fields) {
|
||||
return fmt.Errorf("%v (%T) != %v (%T)",
|
||||
msg := fmt.Sprintf("Actual:\n %v (%T) \nExpected:\n %v (%T)",
|
||||
p.Fields, p.Fields, fields, fields)
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
return nil
|
||||
return
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("unknown measurement %s with tags %v", measurement, tags)
|
||||
msg := fmt.Sprintf("unknown measurement %s with tags %v", measurement, tags)
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
|
||||
func (a *Accumulator) AssertContainsFields(
|
||||
t *testing.T,
|
||||
measurement string,
|
||||
fields map[string]interface{},
|
||||
) {
|
||||
for _, p := range a.Points {
|
||||
if p.Measurement == measurement {
|
||||
if !reflect.DeepEqual(fields, p.Fields) {
|
||||
msg := fmt.Sprintf("Actual:\n %v (%T) \nExpected:\n %v (%T)",
|
||||
p.Fields, p.Fields, fields, fields)
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
msg := fmt.Sprintf("unknown measurement %s", measurement)
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
|
||||
// HasIntValue returns true if the measurement has an Int value
|
||||
func (a *Accumulator) HasIntValue(measurement string) bool {
|
||||
func (a *Accumulator) HasIntField(measurement string, field string) bool {
|
||||
for _, p := range a.Points {
|
||||
if p.Measurement == measurement {
|
||||
_, ok := p.Fields["value"].(int64)
|
||||
return ok
|
||||
for fieldname, value := range p.Fields {
|
||||
if fieldname == field {
|
||||
_, ok := value.(int64)
|
||||
return ok
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -232,11 +181,15 @@ func (a *Accumulator) HasIntValue(measurement string) bool {
|
|||
}
|
||||
|
||||
// HasUIntValue returns true if the measurement has a UInt value
|
||||
func (a *Accumulator) HasUIntValue(measurement string) bool {
|
||||
func (a *Accumulator) HasUIntField(measurement string, field string) bool {
|
||||
for _, p := range a.Points {
|
||||
if p.Measurement == measurement {
|
||||
_, ok := p.Fields["value"].(uint64)
|
||||
return ok
|
||||
for fieldname, value := range p.Fields {
|
||||
if fieldname == field {
|
||||
_, ok := value.(uint64)
|
||||
return ok
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -244,11 +197,15 @@ func (a *Accumulator) HasUIntValue(measurement string) bool {
|
|||
}
|
||||
|
||||
// HasFloatValue returns true if the given measurement has a float value
|
||||
func (a *Accumulator) HasFloatValue(measurement string) bool {
|
||||
func (a *Accumulator) HasFloatField(measurement string, field string) bool {
|
||||
for _, p := range a.Points {
|
||||
if p.Measurement == measurement {
|
||||
_, ok := p.Fields["value"].(float64)
|
||||
return ok
|
||||
for fieldname, value := range p.Fields {
|
||||
if fieldname == field {
|
||||
_, ok := value.(float64)
|
||||
return ok
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue