Merge branch 'master' into fleetdplugin

This commit is contained in:
JPW 2016-08-09 09:53:49 -04:00 committed by GitHub
commit 914c2d4b08
15 changed files with 947 additions and 237 deletions

View File

@ -80,6 +80,7 @@ consistent with the behavior of `collection_jitter`.
- [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument.
- [#1213](https://github.com/influxdata/telegraf/issues/1213): Add inactive & active memory to mem plugin.
- [#1543](https://github.com/influxdata/telegraf/pull/1543): Official Windows service.
- [#1414](https://github.com/influxdata/telegraf/pull/1414): Forking sensors command to remove C package dependency.
### Bugfixes
@ -125,6 +126,8 @@ consistent with the behavior of `collection_jitter`.
- [#1323](https://github.com/influxdata/telegraf/issues/1323): Processes plugin: fix potential error with /proc/net/stat directory.
- [#1322](https://github.com/influxdata/telegraf/issues/1322): Fix rare RHEL 5.2 panic in gopsutil diskio gathering function.
- [#1586](https://github.com/influxdata/telegraf/pull/1586): Remove IF NOT EXISTS from influxdb output database creation.
- [#1600](https://github.com/influxdata/telegraf/issues/1600): Fix quoting with text values in postgresql_extensible plugin.
- [#1425](https://github.com/influxdata/telegraf/issues/1425): Fix win_perf_counter "index out of range" panic.
## v0.13.1 [2016-05-24]

View File

@ -188,7 +188,7 @@ Currently implemented sources:
* [redis](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis)
* [rethinkdb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rethinkdb)
* [riak](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/riak)
* [sensors ](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors) (only available if built from source)
* [sensors](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sensors)
* [snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp)
* [sql server](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver) (microsoft)
* [twemproxy](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/twemproxy)

View File

@ -0,0 +1,36 @@
# Ping input plugin
This input plugin will measures the round-trip
## Windows:
### Configration:
```
## urls to ping
urls = ["www.google.com"] # required
## number of pings to send per collection (ping -n <COUNT>)
count = 4 # required
## Ping timeout, in seconds. 0 means default timeout (ping -w <TIMEOUT>)
Timeout = 0
```
### Measurements & Fields:
- packets_transmitted ( from ping output )
- reply_received ( increasing only on valid metric from echo replay, eg. 'Destination net unreachable' reply will increment packets_received but not reply_received )
- packets_received ( from ping output )
- percent_reply_loss ( compute from packets_transmitted and reply_received )
- percent_packets_loss ( compute from packets_transmitted and packets_received )
- errors ( when host can not be found or wrong prameters is passed to application )
- response time
- average_response_ms ( compute from minimum_response_ms and maximum_response_ms )
- minimum_response_ms ( from ping output )
- maximum_response_ms ( from ping output )
### Tags:
- server
### Example Output:
```
* Plugin: ping, Collection 1
ping,host=WIN-PBAPLP511R7,url=www.google.com average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000
```

View File

@ -65,16 +65,20 @@ func hostPinger(timeout float64, args ...string) (string, error) {
// processPingOutput takes in a string output from the ping command
// based on linux implementation but using regex ( multilanguage support ) ( shouldn't affect the performance of the program )
// It returns (<transmitted packets>, <received packets>, <average response>, <min response>, <max response>)
func processPingOutput(out string) (int, int, int, int, int, error) {
// It returns (<transmitted packets>, <received reply>, <received packet>, <average response>, <min response>, <max response>)
func processPingOutput(out string) (int, int, int, int, int, int, error) {
// So find a line contain 3 numbers except reply lines
var stats, aproxs []string = nil, nil
err := errors.New("Fatal error processing ping output")
stat := regexp.MustCompile(`=\W*(\d+)\D*=\W*(\d+)\D*=\W*(\d+)`)
aprox := regexp.MustCompile(`=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms\D*=\W*(\d+)\D*ms`)
tttLine := regexp.MustCompile(`TTL=\d+`)
lines := strings.Split(out, "\n")
var receivedReply int = 0
for _, line := range lines {
if !strings.Contains(line, "TTL") {
if tttLine.MatchString(line) {
receivedReply++
} else {
if stats == nil {
stats = stat.FindStringSubmatch(line)
}
@ -86,35 +90,35 @@ func processPingOutput(out string) (int, int, int, int, int, error) {
// stats data should contain 4 members: entireExpression + ( Send, Receive, Lost )
if len(stats) != 4 {
return 0, 0, 0, 0, 0, err
return 0, 0, 0, 0, 0, 0, err
}
trans, err := strconv.Atoi(stats[1])
if err != nil {
return 0, 0, 0, 0, 0, err
return 0, 0, 0, 0, 0, 0, err
}
rec, err := strconv.Atoi(stats[2])
receivedPacket, err := strconv.Atoi(stats[2])
if err != nil {
return 0, 0, 0, 0, 0, err
return 0, 0, 0, 0, 0, 0, err
}
// aproxs data should contain 4 members: entireExpression + ( min, max, avg )
if len(aproxs) != 4 {
return trans, rec, 0, 0, 0, err
return trans, receivedReply, receivedPacket, 0, 0, 0, err
}
min, err := strconv.Atoi(aproxs[1])
if err != nil {
return trans, rec, 0, 0, 0, err
return trans, receivedReply, receivedPacket, 0, 0, 0, err
}
max, err := strconv.Atoi(aproxs[2])
if err != nil {
return trans, rec, 0, 0, 0, err
return trans, receivedReply, receivedPacket, 0, 0, 0, err
}
avg, err := strconv.Atoi(aproxs[3])
if err != nil {
return 0, 0, 0, 0, 0, err
return 0, 0, 0, 0, 0, 0, err
}
return trans, rec, avg, min, max, err
return trans, receivedReply, receivedPacket, avg, min, max, err
}
func (p *Ping) timeout() float64 {
@ -159,21 +163,30 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error())
}
tags := map[string]string{"url": u}
trans, rec, avg, min, max, err := processPingOutput(out)
trans, recReply, receivePacket, avg, min, max, err := processPingOutput(out)
if err != nil {
// fatal error
if pendingError != nil {
errorChannel <- pendingError
}
errorChannel <- err
fields := map[string]interface{}{
"errors": 100.0,
}
acc.AddFields("ping", fields, tags)
return
}
// Calculate packet loss percentage
loss := float64(trans-rec) / float64(trans) * 100.0
lossReply := float64(trans-recReply) / float64(trans) * 100.0
lossPackets := float64(trans-receivePacket) / float64(trans) * 100.0
fields := map[string]interface{}{
"packets_transmitted": trans,
"packets_received": rec,
"percent_packet_loss": loss,
"reply_received": recReply,
"packets_received": receivePacket,
"percent_packet_loss": lossPackets,
"percent_reply_loss": lossReply,
}
if avg > 0 {
fields["average_response_ms"] = avg

View File

@ -38,18 +38,20 @@ Approximate round trip times in milli-seconds:
`
func TestHost(t *testing.T) {
trans, rec, avg, min, max, err := processPingOutput(winPLPingOutput)
trans, recReply, recPacket, avg, min, max, err := processPingOutput(winPLPingOutput)
assert.NoError(t, err)
assert.Equal(t, 4, trans, "4 packets were transmitted")
assert.Equal(t, 4, rec, "4 packets were received")
assert.Equal(t, 4, recReply, "4 packets were reply")
assert.Equal(t, 4, recPacket, "4 packets were received")
assert.Equal(t, 50, avg, "Average 50")
assert.Equal(t, 46, min, "Min 46")
assert.Equal(t, 57, max, "max 57")
trans, rec, avg, min, max, err = processPingOutput(winENPingOutput)
trans, recReply, recPacket, avg, min, max, err = processPingOutput(winENPingOutput)
assert.NoError(t, err)
assert.Equal(t, 4, trans, "4 packets were transmitted")
assert.Equal(t, 4, rec, "4 packets were received")
assert.Equal(t, 4, recReply, "4 packets were reply")
assert.Equal(t, 4, recPacket, "4 packets were received")
assert.Equal(t, 50, avg, "Average 50")
assert.Equal(t, 50, min, "Min 50")
assert.Equal(t, 52, max, "Max 52")
@ -72,7 +74,9 @@ func TestPingGather(t *testing.T) {
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 4,
"reply_received": 4,
"percent_packet_loss": 0.0,
"percent_reply_loss": 0.0,
"average_response_ms": 50,
"minimum_response_ms": 50,
"maximum_response_ms": 52,
@ -113,7 +117,9 @@ func TestBadPingGather(t *testing.T) {
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 0,
"reply_received": 0,
"percent_packet_loss": 100.0,
"percent_reply_loss": 100.0,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
}
@ -154,7 +160,9 @@ func TestLossyPingGather(t *testing.T) {
fields := map[string]interface{}{
"packets_transmitted": 9,
"packets_received": 7,
"reply_received": 7,
"percent_packet_loss": 22.22222222222222,
"percent_reply_loss": 22.22222222222222,
"average_response_ms": 115,
"minimum_response_ms": 114,
"maximum_response_ms": 119,
@ -207,12 +215,114 @@ func TestFatalPingGather(t *testing.T) {
}
p.Gather(&acc)
assert.False(t, acc.HasMeasurement("packets_transmitted"),
assert.True(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should have packet measurements")
assert.False(t, acc.HasIntField("ping", "packets_transmitted"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("packets_received"),
assert.False(t, acc.HasIntField("ping", "packets_received"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("percent_packet_loss"),
assert.False(t, acc.HasFloatField("ping", "percent_packet_loss"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasMeasurement("average_response_ms"),
assert.False(t, acc.HasFloatField("ping", "percent_reply_loss"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}
var UnreachablePingOutput = `
Pinging www.google.pl [8.8.8.8] with 32 bytes of data:
Request timed out.
Request timed out.
Reply from 194.204.175.50: Destination net unreachable.
Request timed out.
Ping statistics for 8.8.8.8:
Packets: Sent = 4, Received = 1, Lost = 3 (75% loss),
`
func mockUnreachableHostPinger(timeout float64, args ...string) (string, error) {
return UnreachablePingOutput, errors.New("So very bad")
}
//Reply from 185.28.251.217: TTL expired in transit.
// in case 'Destination net unreachable' ping app return receive packet which is not what we need
// it's not contain valid metric so treat it as lost one
func TestUnreachablePingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.google.com"},
pingHost: mockUnreachableHostPinger,
}
p.Gather(&acc)
tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 1,
"reply_received": 0,
"percent_packet_loss": 75.0,
"percent_reply_loss": 100.0,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
assert.False(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}
var TTLExpiredPingOutput = `
Pinging www.google.pl [8.8.8.8] with 32 bytes of data:
Request timed out.
Request timed out.
Reply from 185.28.251.217: TTL expired in transit.
Request timed out.
Ping statistics for 8.8.8.8:
Packets: Sent = 4, Received = 1, Lost = 3 (75% loss),
`
func mockTTLExpiredPinger(timeout float64, args ...string) (string, error) {
return TTLExpiredPingOutput, errors.New("So very bad")
}
// in case 'Destination net unreachable' ping app return receive packet which is not what we need
// it's not contain valid metric so treat it as lost one
func TestTTLExpiredPingGather(t *testing.T) {
var acc testutil.Accumulator
p := Ping{
Urls: []string{"www.google.com"},
pingHost: mockTTLExpiredPinger,
}
p.Gather(&acc)
tags := map[string]string{"url": "www.google.com"}
fields := map[string]interface{}{
"packets_transmitted": 4,
"packets_received": 1,
"reply_received": 0,
"percent_packet_loss": 75.0,
"percent_reply_loss": 100.0,
}
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
assert.False(t, acc.HasFloatField("ping", "errors"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "average_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "maximum_response_ms"),
"Fatal ping should not have packet measurements")
assert.False(t, acc.HasIntField("ping", "minimum_response_ms"),
"Fatal ping should not have packet measurements")
}

View File

@ -266,29 +266,33 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula
tags := map[string]string{}
tags["server"] = tagAddress
tags["db"] = dbname.String()
var isATag int
fields := make(map[string]interface{})
COLUMN:
for col, val := range columnMap {
if acc.Debug() {
log.Printf("postgresql_extensible: column: %s = %T: %s\n", col, *val, *val)
}
_, ignore := ignoredColumns[col]
if !ignore && *val != nil {
isATag = 0
for tag := range p.AdditionalTags {
if col == p.AdditionalTags[tag] {
isATag = 1
value_type_p := fmt.Sprintf(`%T`, *val)
if value_type_p == "[]uint8" {
tags[col] = fmt.Sprintf(`%s`, *val)
} else if value_type_p == "int64" {
tags[col] = fmt.Sprintf(`%v`, *val)
}
}
if ignore || *val == nil {
continue
}
for _, tag := range p.AdditionalTags {
if col != tag {
continue
}
if isATag == 0 {
fields[col] = *val
switch v := (*val).(type) {
case []byte:
tags[col] = string(v)
case int64:
tags[col] = fmt.Sprintf("%d", v)
}
continue COLUMN
}
if v, ok := (*val).([]byte); ok {
fields[col] = string(v)
} else {
fields[col] = *val
}
}
acc.AddFields(meas_name, fields, tags)

View File

@ -0,0 +1,47 @@
# sensors Input Plugin
Collect [lm-sensors](https://en.wikipedia.org/wiki/Lm_sensors) metrics - requires the lm-sensors
package installed.
This plugin collects sensor metrics with the `sensors` executable from the lm-sensor package.
### Configuration:
```
# Monitor sensors, requires lm-sensors package
[[inputs.sensors]]
## Remove numbers from field names.
## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
# remove_numbers = true
```
### Measurements & Fields:
Fields are created dynamicaly depending on the sensors. All fields are float.
### Tags:
- All measurements have the following tags:
- chip
- feature
### Example Output:
#### Default
```
$ telegraf -config telegraf.conf -input-filter sensors -test
* Plugin: sensors, Collection 1
> sensors,chip=power_meter-acpi-0,feature=power1 power_average=0,power_average_interval=300 1466751326000000000
> sensors,chip=k10temp-pci-00c3,feature=temp1 temp_crit=70,temp_crit_hyst=65,temp_input=29,temp_max=70 1466751326000000000
> sensors,chip=k10temp-pci-00cb,feature=temp1 temp_input=29,temp_max=70 1466751326000000000
> sensors,chip=k10temp-pci-00d3,feature=temp1 temp_input=27.5,temp_max=70 1466751326000000000
> sensors,chip=k10temp-pci-00db,feature=temp1 temp_crit=70,temp_crit_hyst=65,temp_input=29.5,temp_max=70 1466751326000000000
```
#### With remove_numbers=false
```
* Plugin: sensors, Collection 1
> sensors,chip=power_meter-acpi-0,feature=power1 power1_average=0,power1_average_interval=300 1466753424000000000
> sensors,chip=k10temp-pci-00c3,feature=temp1 temp1_crit=70,temp1_crit_hyst=65,temp1_input=29.125,temp1_max=70 1466753424000000000
> sensors,chip=k10temp-pci-00cb,feature=temp1 temp1_input=29,temp1_max=70 1466753424000000000
> sensors,chip=k10temp-pci-00d3,feature=temp1 temp1_input=29.5,temp1_max=70 1466753424000000000
> sensors,chip=k10temp-pci-00db,feature=temp1 temp1_crit=70,temp1_crit_hyst=65,temp1_input=30,temp1_max=70 1466753424000000000
```

View File

@ -1,91 +1,118 @@
// +build linux,sensors
// +build linux
package sensors
import (
"errors"
"fmt"
"os/exec"
"regexp"
"strconv"
"strings"
"github.com/md14454/gosensors"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
var (
execCommand = exec.Command // execCommand is used to mock commands in tests.
numberRegp = regexp.MustCompile("[0-9]+")
)
type Sensors struct {
Sensors []string
RemoveNumbers bool `toml:"remove_numbers"`
path string
}
func (_ *Sensors) Description() string {
return "Monitor sensors using lm-sensors package"
func (*Sensors) Description() string {
return "Monitor sensors, requires lm-sensors package"
}
var sensorsSampleConfig = `
## By default, telegraf gathers stats from all sensors detected by the
## lm-sensors module.
##
## Only collect stats from the selected sensors. Sensors are listed as
## <chip name>:<feature name>. This information can be found by running the
## sensors command, e.g. sensors -u
##
## A * as the feature name will return all features of the chip
##
# sensors = ["coretemp-isa-0000:Core 0", "coretemp-isa-0001:*"]
func (*Sensors) SampleConfig() string {
return `
## Remove numbers from field names.
## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
# remove_numbers = true
`
func (_ *Sensors) SampleConfig() string {
return sensorsSampleConfig
}
func (s *Sensors) Gather(acc telegraf.Accumulator) error {
gosensors.Init()
defer gosensors.Cleanup()
for _, chip := range gosensors.GetDetectedChips() {
for _, feature := range chip.GetFeatures() {
chipName := chip.String()
featureLabel := feature.GetLabel()
if len(s.Sensors) != 0 {
var found bool
for _, sensor := range s.Sensors {
parts := strings.SplitN(sensor, ":", 2)
if parts[0] == chipName {
if parts[1] == "*" || parts[1] == featureLabel {
found = true
break
}
}
}
if !found {
continue
}
}
tags := map[string]string{
"chip": chipName,
"adapter": chip.AdapterName(),
"feature-name": feature.Name,
"feature-label": featureLabel,
}
fieldName := chipName + ":" + featureLabel
fields := map[string]interface{}{
fieldName: feature.GetValue(),
}
acc.AddFields("sensors", fields, tags)
}
if len(s.path) == 0 {
return errors.New("sensors not found: verify that lm-sensors package is installed and that sensors is in your PATH")
}
return s.parse(acc)
}
// parse forks the command:
// sensors -u -A
// and parses the output to add it to the telegraf.Accumulator.
func (s *Sensors) parse(acc telegraf.Accumulator) error {
tags := map[string]string{}
fields := map[string]interface{}{}
chip := ""
cmd := execCommand(s.path, "-A", "-u")
out, err := internal.CombinedOutputTimeout(cmd, time.Second*5)
if err != nil {
return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out))
}
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
for _, line := range lines {
if len(line) == 0 {
acc.AddFields("sensors", fields, tags)
chip = ""
tags = map[string]string{}
fields = map[string]interface{}{}
continue
}
if len(chip) == 0 {
chip = line
tags["chip"] = chip
continue
}
if !strings.HasPrefix(line, " ") {
if len(tags) > 1 {
acc.AddFields("sensors", fields, tags)
}
fields = map[string]interface{}{}
tags = map[string]string{
"chip": chip,
"feature": strings.TrimRight(snake(line), ":"),
}
} else {
splitted := strings.Split(line, ":")
fieldName := strings.TrimSpace(splitted[0])
if s.RemoveNumbers {
fieldName = numberRegp.ReplaceAllString(fieldName, "")
}
fieldValue, err := strconv.ParseFloat(strings.TrimSpace(splitted[1]), 64)
if err != nil {
return err
}
fields[fieldName] = fieldValue
}
}
acc.AddFields("sensors", fields, tags)
return nil
}
func init() {
s := Sensors{
RemoveNumbers: true,
}
path, _ := exec.LookPath("sensors")
if len(path) > 0 {
s.path = path
}
inputs.Add("sensors", func() telegraf.Input {
return &Sensors{}
return &s
})
}
// snake converts string to snake case
func snake(input string) string {
return strings.ToLower(strings.Replace(input, " ", "_", -1))
}

View File

@ -1,3 +0,0 @@
// +build !linux !sensors
package sensors

View File

@ -0,0 +1,3 @@
// +build !linux
package sensors

View File

@ -0,0 +1,328 @@
// +build linux
package sensors
import (
"fmt"
"os"
"os/exec"
"testing"
"github.com/influxdata/telegraf/testutil"
)
func TestGatherDefault(t *testing.T) {
s := Sensors{
RemoveNumbers: true,
path: "sensors",
}
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := s.Gather(&acc)
if err != nil {
t.Fatal(err)
}
var tests = []struct {
tags map[string]string
fields map[string]interface{}
}{
{
map[string]string{
"chip": "acpitz-virtual-0",
"feature": "temp1",
},
map[string]interface{}{
"temp_input": 8.3,
"temp_crit": 31.3,
},
},
{
map[string]string{
"chip": "power_meter-acpi-0",
"feature": "power1",
},
map[string]interface{}{
"power_average": 0.0,
"power_average_interval": 300.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "physical_id_0",
},
map[string]interface{}{
"temp_input": 77.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "core_0",
},
map[string]interface{}{
"temp_input": 75.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "core_1",
},
map[string]interface{}{
"temp_input": 77.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "physical_id_1",
},
map[string]interface{}{
"temp_input": 70.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "core_0",
},
map[string]interface{}{
"temp_input": 66.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "core_1",
},
map[string]interface{}{
"temp_input": 70.0,
"temp_max": 82.0,
"temp_crit": 92.0,
"temp_crit_alarm": 0.0,
},
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, "sensors", test.fields, test.tags)
}
}
func TestGatherNotRemoveNumbers(t *testing.T) {
s := Sensors{
RemoveNumbers: false,
path: "sensors",
}
// overwriting exec commands with mock commands
execCommand = fakeExecCommand
defer func() { execCommand = exec.Command }()
var acc testutil.Accumulator
err := s.Gather(&acc)
if err != nil {
t.Fatal(err)
}
var tests = []struct {
tags map[string]string
fields map[string]interface{}
}{
{
map[string]string{
"chip": "acpitz-virtual-0",
"feature": "temp1",
},
map[string]interface{}{
"temp1_input": 8.3,
"temp1_crit": 31.3,
},
},
{
map[string]string{
"chip": "power_meter-acpi-0",
"feature": "power1",
},
map[string]interface{}{
"power1_average": 0.0,
"power1_average_interval": 300.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "physical_id_0",
},
map[string]interface{}{
"temp1_input": 77.0,
"temp1_max": 82.0,
"temp1_crit": 92.0,
"temp1_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "core_0",
},
map[string]interface{}{
"temp2_input": 75.0,
"temp2_max": 82.0,
"temp2_crit": 92.0,
"temp2_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0000",
"feature": "core_1",
},
map[string]interface{}{
"temp3_input": 77.0,
"temp3_max": 82.0,
"temp3_crit": 92.0,
"temp3_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "physical_id_1",
},
map[string]interface{}{
"temp1_input": 70.0,
"temp1_max": 82.0,
"temp1_crit": 92.0,
"temp1_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "core_0",
},
map[string]interface{}{
"temp2_input": 66.0,
"temp2_max": 82.0,
"temp2_crit": 92.0,
"temp2_crit_alarm": 0.0,
},
},
{
map[string]string{
"chip": "coretemp-isa-0001",
"feature": "core_1",
},
map[string]interface{}{
"temp3_input": 70.0,
"temp3_max": 82.0,
"temp3_crit": 92.0,
"temp3_crit_alarm": 0.0,
},
},
}
for _, test := range tests {
acc.AssertContainsTaggedFields(t, "sensors", test.fields, test.tags)
}
}
// fackeExecCommand is a helper function that mock
// the exec.Command call (and call the test binary)
func fakeExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
// TestHelperProcess isn't a real test. It's used to mock exec.Command
// For example, if you run:
// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking
// it returns below mockData.
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
mockData := `acpitz-virtual-0
temp1:
temp1_input: 8.300
temp1_crit: 31.300
power_meter-acpi-0
power1:
power1_average: 0.000
power1_average_interval: 300.000
coretemp-isa-0000
Physical id 0:
temp1_input: 77.000
temp1_max: 82.000
temp1_crit: 92.000
temp1_crit_alarm: 0.000
Core 0:
temp2_input: 75.000
temp2_max: 82.000
temp2_crit: 92.000
temp2_crit_alarm: 0.000
Core 1:
temp3_input: 77.000
temp3_max: 82.000
temp3_crit: 92.000
temp3_crit_alarm: 0.000
coretemp-isa-0001
Physical id 1:
temp1_input: 70.000
temp1_max: 82.000
temp1_crit: 92.000
temp1_crit_alarm: 0.000
Core 0:
temp2_input: 66.000
temp2_max: 82.000
temp2_crit: 92.000
temp2_crit_alarm: 0.000
Core 1:
temp3_input: 70.000
temp3_max: 82.000
temp3_crit: 92.000
temp3_crit_alarm: 0.000
`
args := os.Args
// Previous arguments are tests stuff, that looks like :
// /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
cmd, args := args[3], args[4:]
if cmd == "sensors" {
fmt.Fprint(os.Stdout, mockData)
} else {
fmt.Fprint(os.Stdout, "command not found")
os.Exit(1)
}
os.Exit(0)
}

View File

@ -272,6 +272,9 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
&bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN.
if ret == win.PDH_MORE_DATA {
filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size)
if len(filledBuf) == 0 {
continue
}
ret = win.PdhGetFormattedCounterArrayDouble(metric.counterHandle,
&bufSize, &bufCount, &filledBuf[0])
for i := 0; i < int(bufCount); i++ {

View File

@ -2,6 +2,42 @@
This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP.
### Configuration:
```toml
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
## The full HTTP or UDP endpoint URL for your InfluxDB instance.
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
## The target database for metrics (telegraf will create it if not exists).
database = "telegraf" # required
## Retention policy to write to. Empty string writes to the default rp.
retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorom", "all"
write_consistency = "any"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
```
### Required parameters:
* `urls`: List of strings, this is for InfluxDB clustering
@ -12,16 +48,14 @@ to write to. Each URL should start with either `http://` or `udp://`
### Optional parameters:
* `write_consistency`: Write consistency (clusters only), can be: "any", "one", "quorom", "all".
* `retention_policy`: Retention policy to write to.
* `precision`: Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". note: using "s" precision greatly improves InfluxDB compression.
* `timeout`: Write timeout (for the InfluxDB client), formatted as a string. If not provided, will default to 5s. 0s means no timeout (not recommended).
* `username`: Username for influxdb
* `password`: Password for influxdb
* `user_agent`: Set the user agent for HTTP POSTs (can be useful for log differentiation)
* `udp_payload`: Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
## Optional SSL Config
* `ssl_ca`: SSL CA
* `ssl_cert`: SSL CERT
* `ssl_key`: SSL key
* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false)
* `write_consistency`: Write consistency for clusters only, can be: "any", "one", "quorom", "all"

View File

@ -7,6 +7,7 @@ import (
"io/ioutil"
"log"
"net/http"
"regexp"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
@ -14,19 +15,22 @@ import (
"github.com/influxdata/telegraf/plugins/serializers/graphite"
)
// Librato structure for configuration and client
type Librato struct {
ApiUser string
ApiToken string
Debug bool
NameFromTags bool
SourceTag string
Timeout internal.Duration
Template string
APIUser string
APIToken string
Debug bool
SourceTag string // Deprecated, keeping for backward-compatibility
Timeout internal.Duration
Template string
apiUrl string
APIUrl string
client *http.Client
}
// https://www.librato.com/docs/kb/faq/best_practices/naming_convention_metrics_sources.html#naming-limitations-for-sources-and-metrics
var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]")
var sampleConfig = `
## Librator API Docs
## http://dev.librato.com/v1/metrics-authentication
@ -36,20 +40,21 @@ var sampleConfig = `
api_token = "my-secret-token" # required.
## Debug
# debug = false
## Tag Field to populate source attribute (optional)
## This is typically the _hostname_ from which the metric was obtained.
source_tag = "host"
## Connection timeout.
# timeout = "5s"
## Output Name Template (same as graphite buckets)
## Output source Template (same as graphite buckets)
## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
template = "host.tags.measurement.field"
## This template is used in librato's source (not metric's name)
template = "host"
`
// LMetrics is the default struct for Librato's API fromat
type LMetrics struct {
Gauges []*Gauge `json:"gauges"`
}
// Gauge is the gauge format for Librato's API fromat
type Gauge struct {
Name string `json:"name"`
Value float64 `json:"value"`
@ -57,17 +62,22 @@ type Gauge struct {
MeasureTime int64 `json:"measure_time"`
}
const librato_api = "https://metrics-api.librato.com/v1/metrics"
const libratoAPI = "https://metrics-api.librato.com/v1/metrics"
func NewLibrato(apiUrl string) *Librato {
// NewLibrato is the main constructor for librato output plugins
func NewLibrato(apiURL string) *Librato {
return &Librato{
apiUrl: apiUrl,
APIUrl: apiURL,
Template: "host",
}
}
// Connect is the default output plugin connection function who make sure it
// can connect to the endpoint
func (l *Librato) Connect() error {
if l.ApiUser == "" || l.ApiToken == "" {
return fmt.Errorf("api_user and api_token are required fields for librato output")
if l.APIUser == "" || l.APIToken == "" {
return fmt.Errorf(
"api_user and api_token are required fields for librato output")
}
l.client = &http.Client{
Timeout: l.Timeout.Duration,
@ -76,18 +86,23 @@ func (l *Librato) Connect() error {
}
func (l *Librato) Write(metrics []telegraf.Metric) error {
if len(metrics) == 0 {
return nil
}
lmetrics := LMetrics{}
if l.Template == "" {
l.Template = "host"
}
if l.SourceTag != "" {
l.Template = l.SourceTag
}
tempGauges := []*Gauge{}
metricCounter := 0
for _, m := range metrics {
if gauges, err := l.buildGauges(m); err == nil {
for _, gauge := range gauges {
tempGauges = append(tempGauges, gauge)
metricCounter++
if l.Debug {
log.Printf("[DEBUG] Got a gauge: %v\n", gauge)
}
@ -100,81 +115,115 @@ func (l *Librato) Write(metrics []telegraf.Metric) error {
}
}
lmetrics.Gauges = make([]*Gauge, metricCounter)
copy(lmetrics.Gauges, tempGauges[0:])
metricsBytes, err := json.Marshal(lmetrics)
if err != nil {
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
} else {
metricCounter := len(tempGauges)
// make sur we send a batch of maximum 300
sizeBatch := 300
for start := 0; start < metricCounter; start += sizeBatch {
lmetrics := LMetrics{}
end := start + sizeBatch
if end > metricCounter {
end = metricCounter
sizeBatch = end - start
}
lmetrics.Gauges = make([]*Gauge, sizeBatch)
copy(lmetrics.Gauges, tempGauges[start:end])
metricsBytes, err := json.Marshal(lmetrics)
if err != nil {
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
}
if l.Debug {
log.Printf("[DEBUG] Librato request: %v\n", string(metricsBytes))
}
}
req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes))
if err != nil {
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
}
req.Header.Add("Content-Type", "application/json")
req.SetBasicAuth(l.ApiUser, l.ApiToken)
resp, err := l.client.Do(req)
if err != nil {
if l.Debug {
log.Printf("[DEBUG] Error POSTing metrics: %v\n", err.Error())
req, err := http.NewRequest(
"POST",
l.APIUrl,
bytes.NewBuffer(metricsBytes))
if err != nil {
return fmt.Errorf(
"unable to create http.Request, %s\n",
err.Error())
}
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
} else {
if l.Debug {
req.Header.Add("Content-Type", "application/json")
req.SetBasicAuth(l.APIUser, l.APIToken)
resp, err := l.client.Do(req)
if err != nil {
if l.Debug {
log.Printf("[DEBUG] Error POSTing metrics: %v\n", err.Error())
}
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != 200 || l.Debug {
htmlData, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("[DEBUG] Couldn't get response! (%v)\n", err)
} else {
}
if resp.StatusCode != 200 {
return fmt.Errorf(
"received bad status code, %d\n %s",
resp.StatusCode,
string(htmlData))
}
if l.Debug {
log.Printf("[DEBUG] Librato response: %v\n", string(htmlData))
}
}
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
}
return nil
}
// SampleConfig is function who return the default configuration for this
// output
func (l *Librato) SampleConfig() string {
return sampleConfig
}
// Description is function who return the Description of this output
func (l *Librato) Description() string {
return "Configuration for Librato API to send metrics to."
}
func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
gauges := []*Gauge{}
bucket := graphite.SerializeBucketName(m.Name(), m.Tags(), l.Template, "")
if m.Time().Unix() == 0 {
return gauges, fmt.Errorf(
"Measure time must not be zero\n <%s> \n",
m.String())
}
metricSource := graphite.InsertField(
graphite.SerializeBucketName("", m.Tags(), l.Template, ""),
"value")
if metricSource == "" {
return gauges,
fmt.Errorf("undeterminable Source type from Field, %s\n",
l.Template)
}
for fieldName, value := range m.Fields() {
metricName := m.Name()
if fieldName != "value" {
metricName = fmt.Sprintf("%s.%s", m.Name(), fieldName)
}
gauge := &Gauge{
Name: graphite.InsertField(bucket, fieldName),
Source: reUnacceptedChar.ReplaceAllString(metricSource, "-"),
Name: reUnacceptedChar.ReplaceAllString(metricName, "-"),
MeasureTime: m.Time().Unix(),
}
if !gauge.verifyValue(value) {
if !verifyValue(value) {
continue
}
if err := gauge.setValue(value); err != nil {
return gauges, fmt.Errorf("unable to extract value from Fields, %s\n",
return gauges, fmt.Errorf(
"unable to extract value from Fields, %s\n",
err.Error())
}
if l.SourceTag != "" {
if source, ok := m.Tags()[l.SourceTag]; ok {
gauge.Source = source
} else {
return gauges,
fmt.Errorf("undeterminable Source type from Field, %s\n",
l.SourceTag)
}
}
gauges = append(gauges, gauge)
}
if l.Debug {
@ -183,7 +232,7 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
return gauges, nil
}
func (g *Gauge) verifyValue(v interface{}) bool {
func verifyValue(v interface{}) bool {
switch v.(type) {
case string:
return false
@ -209,12 +258,13 @@ func (g *Gauge) setValue(v interface{}) error {
return nil
}
//Close is used to close the connection to librato Output
func (l *Librato) Close() error {
return nil
}
func init() {
outputs.Add("librato", func() telegraf.Output {
return NewLibrato(librato_api)
return NewLibrato(libratoAPI)
})
}

View File

@ -1,7 +1,6 @@
package librato
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
@ -10,141 +9,137 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers/graphite"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/require"
)
var (
fakeUrl = "http://test.librato.com"
fakeURL = "http://test.librato.com"
fakeUser = "telegraf@influxdb.com"
fakeToken = "123456"
)
func fakeLibrato() *Librato {
l := NewLibrato(fakeUrl)
l.ApiUser = fakeUser
l.ApiToken = fakeToken
l := NewLibrato(fakeURL)
l.APIUser = fakeUser
l.APIToken = fakeToken
return l
}
func BuildTags(t *testing.T) {
testMetric := testutil.TestMetric(0.0, "test1")
graphiteSerializer := graphite.GraphiteSerializer{}
tags, err := graphiteSerializer.Serialize(testMetric)
fmt.Printf("Tags: %v", tags)
require.NoError(t, err)
}
func TestUriOverride(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
l := NewLibrato(ts.URL)
l.ApiUser = "telegraf@influxdb.com"
l.ApiToken = "123456"
l.APIUser = "telegraf@influxdb.com"
l.APIToken = "123456"
err := l.Connect()
require.NoError(t, err)
err = l.Write(testutil.MockMetrics())
err = l.Write([]telegraf.Metric{newHostMetric(int32(0), "name", "host")})
require.NoError(t, err)
}
func TestBadStatusCode(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
json.NewEncoder(w).Encode(`{
"errors": {
"system": [
"The API is currently down for maintenance. It'll be back shortly."
]
}
}`)
}))
ts := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
}))
defer ts.Close()
l := NewLibrato(ts.URL)
l.ApiUser = "telegraf@influxdb.com"
l.ApiToken = "123456"
l.APIUser = "telegraf@influxdb.com"
l.APIToken = "123456"
err := l.Connect()
require.NoError(t, err)
err = l.Write(testutil.MockMetrics())
err = l.Write([]telegraf.Metric{newHostMetric(int32(0), "name", "host")})
if err == nil {
t.Errorf("error expected but none returned")
} else {
require.EqualError(t, fmt.Errorf("received bad status code, 503\n"), err.Error())
require.EqualError(
t,
fmt.Errorf("received bad status code, 503\n "), err.Error())
}
}
func TestBuildGauge(t *testing.T) {
mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()
var gaugeTests = []struct {
ptIn telegraf.Metric
outGauge *Gauge
err error
}{
{
testutil.TestMetric(0.0, "test1"),
newHostMetric(0.0, "test1", "host1"),
&Gauge{
Name: "value1.test1",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Name: "test1",
MeasureTime: mtime,
Value: 0.0,
Source: "host1",
},
nil,
},
{
testutil.TestMetric(1.0, "test2"),
newHostMetric(1.0, "test2", "host2"),
&Gauge{
Name: "value1.test2",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Name: "test2",
MeasureTime: mtime,
Value: 1.0,
Source: "host2",
},
nil,
},
{
testutil.TestMetric(10, "test3"),
newHostMetric(10, "test3", "host3"),
&Gauge{
Name: "value1.test3",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Name: "test3",
MeasureTime: mtime,
Value: 10.0,
Source: "host3",
},
nil,
},
{
testutil.TestMetric(int32(112345), "test4"),
newHostMetric(int32(112345), "test4", "host4"),
&Gauge{
Name: "value1.test4",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Name: "test4",
MeasureTime: mtime,
Value: 112345.0,
Source: "host4",
},
nil,
},
{
testutil.TestMetric(int64(112345), "test5"),
newHostMetric(int64(112345), "test5", "host5"),
&Gauge{
Name: "value1.test5",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Name: "test5",
MeasureTime: mtime,
Value: 112345.0,
Source: "host5",
},
nil,
},
{
testutil.TestMetric(float32(11234.5), "test6"),
newHostMetric(float32(11234.5), "test6", "host6"),
&Gauge{
Name: "value1.test6",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Name: "test6",
MeasureTime: mtime,
Value: 11234.5,
Source: "host6",
},
nil,
},
{
testutil.TestMetric("11234.5", "test7"),
newHostMetric("11234.5", "test7", "host7"),
nil,
nil,
},
}
l := NewLibrato(fakeUrl)
l := NewLibrato(fakeURL)
for _, gt := range gaugeTests {
gauges, err := l.buildGauges(gt.ptIn)
if err != nil && gt.err == nil {
@ -167,61 +162,121 @@ func TestBuildGauge(t *testing.T) {
}
}
func newHostMetric(value interface{}, name, host string) (metric telegraf.Metric) {
metric, _ = telegraf.NewMetric(
name,
map[string]string{"host": host},
map[string]interface{}{"value": value},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
)
return
}
func TestBuildGaugeWithSource(t *testing.T) {
mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
pt1, _ := telegraf.NewMetric(
"test1",
map[string]string{"hostname": "192.168.0.1", "tag1": "value1"},
map[string]interface{}{"value": 0.0},
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
mtime,
)
pt2, _ := telegraf.NewMetric(
"test2",
map[string]string{"hostnam": "192.168.0.1", "tag1": "value1"},
map[string]interface{}{"value": 1.0},
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC),
mtime,
)
pt3, _ := telegraf.NewMetric(
"test3",
map[string]string{
"hostname": "192.168.0.1",
"tag2": "value2",
"tag1": "value1"},
map[string]interface{}{"value": 1.0},
mtime,
)
pt4, _ := telegraf.NewMetric(
"test4",
map[string]string{
"hostname": "192.168.0.1",
"tag2": "value2",
"tag1": "value1"},
map[string]interface{}{"value": 1.0},
mtime,
)
var gaugeTests = []struct {
ptIn telegraf.Metric
template string
outGauge *Gauge
err error
}{
{
pt1,
"hostname",
&Gauge{
Name: "192_168_0_1.value1.test1",
MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Name: "test1",
MeasureTime: mtime.Unix(),
Value: 0.0,
Source: "192.168.0.1",
Source: "192_168_0_1",
},
nil,
},
{
pt2,
"hostname",
&Gauge{
Name: "192_168_0_1.value1.test1",
MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(),
Name: "test2",
MeasureTime: mtime.Unix(),
Value: 1.0,
},
fmt.Errorf("undeterminable Source type from Field, hostname"),
},
{
pt3,
"tags",
&Gauge{
Name: "test3",
MeasureTime: mtime.Unix(),
Value: 1.0,
Source: "192_168_0_1.value1.value2",
},
nil,
},
{
pt4,
"hostname.tag2",
&Gauge{
Name: "test4",
MeasureTime: mtime.Unix(),
Value: 1.0,
Source: "192_168_0_1.value2",
},
nil,
},
}
l := NewLibrato(fakeUrl)
l.SourceTag = "hostname"
l := NewLibrato(fakeURL)
for _, gt := range gaugeTests {
l.Template = gt.template
gauges, err := l.buildGauges(gt.ptIn)
if err != nil && gt.err == nil {
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
}
if gt.err != nil && err == nil {
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
t.Errorf(
"%s: expected an error (%s) but none returned",
gt.ptIn.Name(),
gt.err.Error())
}
if len(gauges) == 0 {
continue
}
if gt.err == nil && !reflect.DeepEqual(gauges[0], gt.outGauge) {
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauges[0])
t.Errorf(
"%s: \nexpected %+v\ngot %+v\n",
gt.ptIn.Name(),
gt.outGauge, gauges[0])
}
}
}