Compare commits

...

12 Commits

Author SHA1 Message Date
Cameron Sparr
95bad9e55b OpenTSDB filter types for HTTP AND telnet 2017-01-13 11:44:28 +00:00
Cameron Sparr
e812a2efc6 Accept an HTTP request body without newline at end (#2266)
I don't like this behavior, but it's what InfluxDB accepts, so the
telegraf listener should be consistent with that.

I accidentally reverted this behavior when I refactored the telegraf
metric representation earlier in this release cycle.
2017-01-13 11:43:50 +00:00
Cameron Sparr
411853fc74 update etc/telegraf.conf 2017-01-12 11:14:12 +00:00
Patrick Hemmer
b7d29ca0e9 allow changing jolokia delimiter (#2255) 2017-01-12 11:08:22 +00:00
Mohammad Ali Alfarra
947e1909ff Document basic auth for haproxy (#2258)
* Document basic auth for haproxy

* Typo in haproxy readme
2017-01-12 08:47:01 +00:00
Cameron Sparr
31a4f03031 mongodb: Remove superfluous ReplSet log message
closes #2248
2017-01-11 17:50:01 +00:00
Emil Haugbergsmyr
81f95e7a29 Fixes change in Kafka consumer input plugin (#2222)
* Fixes change to the error api in the kafka project.

* Updated test to reflect the change.

* Update kafka to match master branch.
2017-01-11 16:24:09 +00:00
Kurt Mackey
2aa2c796e5 Fix for broken librato output (#2225)
* Fix for broken librato output

These errors are delightful, but I'd rather avoid them:

```
Error parsing /etc/telegraf/telegraf.conf, line 2: field corresponding to `api_user' is not defined in `*librato.Librato'
```

* Fixed bad format from last commit
2017-01-09 14:48:32 +00:00
Patrick Hemmer
a658e6c509 ensure proper context on snmp error messages (#2220) 2017-01-09 13:03:33 +00:00
Jérôme Vizcaino
5f6766f6e1 ceph: sample config should reflect actual defaults (#2228) 2017-01-09 12:51:15 +00:00
Cameron Sparr
7279018cfe readme fixup & test output fixup 2017-01-09 12:28:13 +00:00
Cameron Sparr
4b08d127e0 mongodb: dont print unecessary & inaccurate auth failure
closes #2209
2017-01-06 13:11:24 +01:00
19 changed files with 194 additions and 61 deletions

View File

@@ -44,6 +44,7 @@ plugins, not just statsd.
- [#1942](https://github.com/influxdata/telegraf/pull/1942): Change Amazon Kinesis output plugin to use the built-in serializer plugins.
- [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages.
- [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin
- [#2255](https://github.com/influxdata/telegraf/pull/2255): Allow changing jolokia attribute delimiter
### Bugfixes
@@ -66,6 +67,7 @@ plugins, not just statsd.
- [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses.
- [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin.
- [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix.
- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages
## v1.1.2 [2016-12-12]

2
Godeps
View File

@@ -52,7 +52,7 @@ github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363

View File

@@ -140,8 +140,6 @@
# # retention_policy = "default"
# ## InfluxDB database
# # database = "telegraf"
# ## InfluxDB precision
# # precision = "s"
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
@@ -190,6 +188,11 @@
# # timeout = "5s"
# # Send metrics to nowhere at all
# [[outputs.discard]]
# # no configuration
# # Send telegraf metrics to file(s)
# [[outputs.file]]
# ## Files to write to, "stdout" is a specially handled file.
@@ -219,7 +222,7 @@
# # Send telegraf metrics to graylog(s)
# [[outputs.graylog]]
# ## Udp endpoint for your graylog instance.
# ## UDP endpoint for your graylog instance.
# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
@@ -312,9 +315,13 @@
# streamname = "StreamName"
# ## PartitionKey as used for sharding data.
# partitionkey = "PartitionKey"
# ## format of the Data payload in the kinesis PutRecord, supported
# ## String and Custom.
# format = "string"
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
#
# ## debug will show upstream aws messages.
# debug = false
@@ -351,6 +358,9 @@
# # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics"
#
# ## client ID, if not set a random ID is generated
# # client_id = ""
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
@@ -428,6 +438,9 @@
# [[outputs.prometheus_client]]
# ## Address to listen on
# # listen = ":9126"
#
# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
# # expiration_interval = "60s"
# # Configuration for the Riemann server to send metrics to
@@ -538,6 +551,19 @@
# ## An array of Apache status URI to gather stats.
# ## Default is "http://localhost/server-status?auto".
# urls = ["http://localhost/server-status?auto"]
# ## user credentials for basic HTTP authentication
# username = "myuser"
# password = "mypassword"
#
# ## Timeout to the complete conection and reponse time in seconds
# response_timeout = "25s" ## default to 5 seconds
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read metrics of bcache from stats_total and dirty_data
@@ -640,6 +666,13 @@
# #profile = ""
# #shared_credential_file = ""
#
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
# # metrics are made available to the 1 minute period. Some are collected at
# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
# # Note that if a period is configured that is smaller than the minimum for a
# # particular metric, that metric will not be returned by the Cloudwatch API
# # and will not be collected by Telegraf.
# #
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
# period = "5m"
#
@@ -789,13 +822,13 @@
# ## of the cluster.
# local = true
#
# ## set cluster_health to true when you want to also obtain cluster health stats
# ## Set cluster_health to true when you want to also obtain cluster health stats
# cluster_health = false
#
# ## Set cluster_stats to true when you want to obtain cluster stats from the
# ## Set cluster_stats to true when you want to also obtain cluster stats from the
# ## Master node.
# cluster_stats = false
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
@@ -980,6 +1013,12 @@
# timeout = "5s"
# # Collect statistics about itself
# [[inputs.internal]]
# ## If true, collect telegraf memory stats.
# # collect_memstats = true
# # Read metrics from one or many bare metal servers
# [[inputs.ipmi_sensor]]
# ## specify servers via a url matching:
@@ -993,8 +1032,9 @@
# # Read JMX metrics through Jolokia
# [[inputs.jolokia]]
# ## This is the context root used to compose the jolokia url
# ## NOTE that Jolokia requires a trailing slash at the end of the context root
# ## NOTE that your jolokia security policy must allow for POST requests.
# context = "/jolokia"
# context = "/jolokia/"
#
# ## This specifies the mode used
# # mode = "proxy"
@@ -1006,6 +1046,15 @@
# # host = "127.0.0.1"
# # port = "8080"
#
# ## Optional http timeouts
# ##
# ## response_header_timeout, if non-zero, specifies the amount of time to wait
# ## for a server's response headers after fully writing the request.
# # response_header_timeout = "3s"
# ##
# ## client_timeout specifies a time limit for requests made by this client.
# ## Includes connection time, any redirects, and reading the response body.
# # client_timeout = "4s"
#
# ## List of servers exposing jolokia read service
# [[inputs.jolokia.servers]]
@@ -1144,8 +1193,8 @@
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
# ## e.g.
# ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false
# ## db_user@tcp(127.0.0.1:3306)/?tls=false
# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
# #
# ## If no servers are specified, then localhost is used as the host.
# servers = ["tcp(127.0.0.1:3306)/"]
@@ -1206,18 +1255,24 @@
# # TCP or UDP 'ping' given url and collect response time in seconds
# [[inputs.net_response]]
# ## Protocol, must be "tcp" or "udp"
# ## NOTE: because the "udp" protocol does not respond to requests, it requires
# ## a send/expect string pair (see below).
# protocol = "tcp"
# ## Server address (default localhost)
# address = "github.com:80"
# address = "localhost:80"
# ## Set timeout
# timeout = "1s"
#
# ## Optional string sent to the server
# # send = "ssh"
# ## Optional expected string in answer
# # expect = "ssh"
# ## Set read timeout (only used if expecting a response)
# read_timeout = "1s"
#
# ## The following options are required for UDP checks. For TCP, they are
# ## optional. The plugin will send the given string to the server and then
# ## expect to receive the given 'expect' string back.
# ## string sent to the server
# # send = "ssh"
# ## expected string in answer
# # expect = "ssh"
# # Read TCP metrics such as established, time wait and sockets counts.
@@ -1419,6 +1474,8 @@
# prefix = ""
# ## comment this out if you want raw cpu_time stats
# fielddrop = ["cpu_time_*"]
# ## This is optional; moves pid into a tag instead of a field
# pid_tag = false
# # Read metrics from one or many prometheus clients
@@ -1429,6 +1486,9 @@
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
#
# ## Specify timeout duration for slower prometheus clients (default is 3s)
# # response_timeout = "3s"
#
# ## Optional SSL Config
# # ssl_ca = /path/to/cafile
# # ssl_cert = /path/to/certfile
@@ -1457,6 +1517,16 @@
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Optional request timeouts
# ##
# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
# ## for a server's response headers after fully writing the request.
# # header_timeout = "3s"
# ##
# ## client_timeout specifies a time limit for requests made by this client.
# ## Includes connection time, any redirects, and reading the response body.
# # client_timeout = "4s"
#
# ## A list of nodes to pull metrics about. If not specified, metrics for
# ## all nodes are gathered.
# # nodes = ["rabbit@node1", "rabbit@node2"]
@@ -1879,14 +1949,19 @@
# [[inputs.statsd]]
# ## Address and port to host UDP listener on
# service_address = ":8125"
# ## Delete gauges every interval (default=false)
# delete_gauges = false
# ## Delete counters every interval (default=false)
# delete_counters = false
# ## Delete sets every interval (default=false)
# delete_sets = false
# ## Delete timings & histograms every interval (default=true)
#
# ## The following configuration options control when telegraf clears it's cache
# ## of previous values. If set to false, then telegraf will only clear it's
# ## cache when the daemon is restarted.
# ## Reset gauges every interval (default=true)
# delete_gauges = true
# ## Reset counters every interval (default=true)
# delete_counters = true
# ## Reset sets every interval (default=true)
# delete_sets = true
# ## Reset timings & histograms every interval (default=true)
# delete_timings = true
#
# ## Percentiles to calculate for timing & histogram stats
# percentiles = [90]
#
@@ -1927,6 +2002,8 @@
# files = ["/var/mymetrics.out"]
# ## Read file from beginning.
# from_beginning = false
# ## Whether file is a named pipe
# pipe = false
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
@@ -1963,6 +2040,10 @@
# ## UDP listener will start dropping packets.
# # allowed_pending_messages = 10000
#
# ## Set the buffer size of the UDP connection outside of OS default (in bytes)
# ## If set to 0, take OS default
# udp_buffer_size = 16777216
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
@@ -1986,3 +2067,4 @@
#
# [inputs.webhooks.rollbar]
# path = "/rollbar"

View File

@@ -75,7 +75,7 @@ func (r *RunningInput) MakeMetric(
)
if r.trace && m != nil {
fmt.Println("> " + m.String())
fmt.Print("> " + m.String())
}
r.MetricsGathered.Incr(1)

View File

@@ -82,7 +82,7 @@ the cluster. The currently supported commands are:
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
## to be specified
gather_cluster_stats = true
gather_cluster_stats = false
```
### Measurements & Fields:

View File

@@ -68,7 +68,7 @@ var sampleConfig = `
gather_admin_socket_stats = true
## Whether to gather statistics via ceph commands
gather_cluster_stats = true
gather_cluster_stats = false
`
func (c *Ceph) SampleConfig() string {

View File

@@ -12,6 +12,8 @@
Server addresses need to explicitly start with 'http' if you wish to use HAproxy status page. Otherwise, address will be assumed to be an UNIX socket and protocol (if present) will be discarded.
For basic authentication you need to add username and password in the URL: `http://user:password@1.2.3.4/haproxy?stats`.
Following examples will all resolve to the same socket:
```
socket:/var/run/haproxy.sock

View File

@@ -300,6 +300,9 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
}
func (h *HTTPListener) parse(b []byte, t time.Time) error {
if !bytes.HasSuffix(b, []byte("\n")) {
b = append(b, '\n')
}
metrics, err := h.parser.ParseWithDefaultTime(b, t)
for _, m := range metrics {

View File

@@ -16,6 +16,8 @@ import (
const (
testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n"
testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257"
testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257
cpu_load_short,host=server03 value=12.0 1422568543702900257
cpu_load_short,host=server04 value=12.0 1422568543702900257
@@ -81,6 +83,28 @@ func TestWriteHTTP(t *testing.T) {
)
}
// http listener should add a newline at the end of the buffer if it's not there
func TestWriteHTTPNoNewline(t *testing.T) {
listener := newTestHTTPListener()
acc := &testutil.Accumulator{}
require.NoError(t, listener.Start(acc))
defer listener.Stop()
time.Sleep(time.Millisecond * 25)
// post single message to listener
resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgNoNewline)))
require.NoError(t, err)
require.EqualValues(t, 204, resp.StatusCode)
time.Sleep(time.Millisecond * 15)
acc.AssertContainsTaggedFields(t, "cpu_load_short",
map[string]interface{}{"value": float64(12)},
map[string]string{"host": "server01"},
)
}
func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
listener := &HTTPListener{
ServiceAddress: ":8296",

View File

@@ -47,12 +47,13 @@ func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error
}
type Jolokia struct {
jClient JolokiaClient
Context string
Mode string
Servers []Server
Metrics []Metric
Proxy Server
jClient JolokiaClient
Context string
Mode string
Servers []Server
Metrics []Metric
Proxy Server
Delimiter string
ResponseHeaderTimeout internal.Duration `toml:"response_header_timeout"`
ClientTimeout internal.Duration `toml:"client_timeout"`
@@ -84,6 +85,13 @@ const sampleConfig = `
## Includes connection time, any redirects, and reading the response body.
# client_timeout = "4s"
## Attribute delimiter
##
## When multiple attributes are returned for a single
## [inputs.jolokia.metrics], the field name is a concatenation of the metric
## name, and the attribute name, separated by the given delimiter.
# delimiter = "_"
## List of servers exposing jolokia read service
[[inputs.jolokia.servers]]
name = "as-server-01"
@@ -238,10 +246,10 @@ func (j *Jolokia) prepareRequest(server Server, metric Metric) (*http.Request, e
return req, nil
}
func extractValues(measurement string, value interface{}, fields map[string]interface{}) {
func (j *Jolokia) extractValues(measurement string, value interface{}, fields map[string]interface{}) {
if mapValues, ok := value.(map[string]interface{}); ok {
for k2, v2 := range mapValues {
extractValues(measurement+"_"+k2, v2, fields)
j.extractValues(measurement+j.Delimiter+k2, v2, fields)
}
} else {
fields[measurement] = value
@@ -282,7 +290,7 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
fmt.Printf("Error handling response: %s\n", err)
} else {
if values, ok := out["value"]; ok {
extractValues(measurement, values, fields)
j.extractValues(measurement, values, fields)
} else {
fmt.Printf("Missing key 'value' in output response\n")
}
@@ -301,6 +309,7 @@ func init() {
return &Jolokia{
ResponseHeaderTimeout: DefaultResponseHeaderTimeout,
ClientTimeout: DefaultClientTimeout,
Delimiter: "_",
}
})
}

View File

@@ -104,9 +104,10 @@ func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
func genJolokiaClientStub(response string, statusCode int, servers []Server, metrics []Metric) *Jolokia {
return &Jolokia{
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
Servers: servers,
Metrics: metrics,
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
Servers: servers,
Metrics: metrics,
Delimiter: "_",
}
}

View File

@@ -33,7 +33,7 @@ type Kafka struct {
// channel for all incoming kafka messages
in <-chan *sarama.ConsumerMessage
// channel for all kafka consumer errors
errs <-chan *sarama.ConsumerError
errs <-chan error
done chan struct{}
// keep the accumulator internally:

View File

@@ -27,7 +27,7 @@ func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
Offset: "oldest",
in: in,
doNotCommitMsgs: true,
errs: make(chan *sarama.ConsumerError, 1000),
errs: make(chan error, 1000),
done: make(chan struct{}),
}
return &k, in

View File

@@ -130,7 +130,6 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
sess, err := mgo.DialWithInfo(dialInfo)
if err != nil {
fmt.Printf("error dialing over ssl, %s\n", err.Error())
return fmt.Errorf("Unable to connect to MongoDB, %s\n", err.Error())
}
server.Session = sess

View File

@@ -40,15 +40,14 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
return err
}
result_repl := &ReplSetStatus{}
err = s.Session.DB("admin").Run(bson.D{
// ignore error because it simply indicates that the db is not a member
// in a replica set, which is fine.
_ = s.Session.DB("admin").Run(bson.D{
{
Name: "replSetGetStatus",
Value: 1,
},
}, result_repl)
if err != nil {
log.Println("E! Not gathering replica set status, member not in replica set (" + err.Error() + ")")
}
jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()

View File

@@ -146,13 +146,13 @@ func (s *Snmp) init() error {
for i := range s.Tables {
if err := s.Tables[i].init(); err != nil {
return err
return Errorf(err, "initializing table %s", s.Tables[i].Name)
}
}
for i := range s.Fields {
if err := s.Fields[i].init(); err != nil {
return err
return Errorf(err, "initializing field %s", s.Fields[i].Name)
}
}
@@ -192,7 +192,7 @@ func (t *Table) init() error {
// initialize all the nested fields
for i := range t.Fields {
if err := t.Fields[i].init(); err != nil {
return err
return Errorf(err, "initializing field %s", t.Fields[i].Name)
}
}
@@ -210,7 +210,7 @@ func (t *Table) initBuild() error {
_, _, oidText, fields, err := snmpTable(t.Oid)
if err != nil {
return Errorf(err, "initializing table %s", t.Oid)
return err
}
if t.Name == "" {
t.Name = oidText
@@ -252,7 +252,7 @@ func (f *Field) init() error {
_, oidNum, oidText, conversion, err := snmpTranslate(f.Oid)
if err != nil {
return Errorf(err, "translating %s", f.Oid)
return Errorf(err, "translating")
}
f.Oid = oidNum
if f.Name == "" {
@@ -358,7 +358,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
// Now is the real tables.
for _, t := range s.Tables {
if err := s.gatherTable(acc, gs, t, topTags, true); err != nil {
acc.AddError(Errorf(err, "agent %s", agent))
acc.AddError(Errorf(err, "agent %s: gathering table %s", agent, t.Name))
}
}
}
@@ -406,7 +406,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
}
if len(f.Oid) == 0 {
return nil, fmt.Errorf("cannot have empty OID")
return nil, fmt.Errorf("cannot have empty OID on field %s", f.Name)
}
var oid string
if f.Oid[0] == '.' {
@@ -426,12 +426,12 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
// empty string. This results in all the non-table fields sharing the same
// index, and being added on the same row.
if pkt, err := gs.Get([]string{oid}); err != nil {
return nil, Errorf(err, "performing get")
return nil, Errorf(err, "performing get on field %s", f.Name)
} else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance {
ent := pkt.Variables[0]
fv, err := fieldConvert(f.Conversion, ent.Value)
if err != nil {
return nil, Errorf(err, "converting %q", ent.Value)
return nil, Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name)
}
if fvs, ok := fv.(string); !ok || fvs != "" {
ifv[""] = fv
@@ -454,7 +454,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
fv, err := fieldConvert(f.Conversion, ent.Value)
if err != nil {
return Errorf(err, "converting %q", ent.Value)
return Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name)
}
if fvs, ok := fv.(string); !ok || fvs != "" {
ifv[idx] = fv
@@ -463,7 +463,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
})
if err != nil {
if _, ok := err.(NestedError); !ok {
return nil, Errorf(err, "performing bulk walk")
return nil, Errorf(err, "performing bulk walk for field %s", f.Name)
}
}
}

View File

@@ -19,6 +19,7 @@ to the unix `uptime` command.
- load15 (float)
- load5 (float)
- n_users (integer)
- n_cpus (integer)
- uptime (integer, seconds)
- uptime_format (string)
@@ -31,5 +32,7 @@ None
```
$ telegraf -config ~/ws/telegraf.conf -input-filter system -test
* Plugin: system, Collection 1
> system load1=2.05,load15=2.38,load5=2.03,n_users=4i,uptime=239043i,uptime_format="2 days, 18:24" 1457546165399253452
* Plugin: inputs.system, Collection 1
> system,host=tyrion load1=3.72,load5=2.4,load15=2.1,n_users=3i,n_cpus=4i 1483964144000000000
> system,host=tyrion uptime=1249632i,uptime_format="14 days, 11:07" 1483964144000000000
```

View File

@@ -17,8 +17,8 @@ import (
// Librato structure for configuration and client
type Librato struct {
APIUser string
APIToken string
APIUser string `toml:"api_user"`
APIToken string `toml:"api_token"`
Debug bool
SourceTag string // Deprecated, keeping for backward-compatibility
Timeout internal.Duration

View File

@@ -157,6 +157,15 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
tags := ToLineFormat(cleanTags(m.Tags()))
for fieldName, value := range m.Fields() {
switch value.(type) {
case int64:
case uint64:
case float64:
default:
log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value)
continue
}
metricValue, buildError := buildValue(value)
if buildError != nil {
log.Printf("E! OpenTSDB: %s\n", buildError.Error())