Compare commits
4 Commits
1.2.0-rc1
...
dgn-extern
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1b5af9f303 | ||
|
|
7dfbd03e61 | ||
|
|
696e2c17ed | ||
|
|
f4a7f5c885 |
48
CHANGELOG.md
48
CHANGELOG.md
@@ -2,19 +2,6 @@
|
||||
|
||||
### Release Notes
|
||||
|
||||
- The StatsD plugin will now default all "delete_" config options to "true". This
|
||||
will change te default behavior for users who were not specifying these parameters
|
||||
in their config file.
|
||||
|
||||
- The StatsD plugin will also no longer save it's state on a service reload.
|
||||
Essentially we have reverted PR [#887](https://github.com/influxdata/telegraf/pull/887).
|
||||
The reason for this is that saving the state in a global variable is not
|
||||
thread-safe (see [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102)),
|
||||
and this creates issues if users want to define multiple instances
|
||||
of the statsd plugin. Saving state on reload may be considered in the future,
|
||||
but this would need to be implemented at a higher level and applied to all
|
||||
plugins, not just statsd.
|
||||
|
||||
### Features
|
||||
|
||||
- [#2123](https://github.com/influxdata/telegraf/pull/2123): Fix improper calculation of CPU percentages
|
||||
@@ -27,47 +14,12 @@ plugins, not just statsd.
|
||||
- [#2127](https://github.com/influxdata/telegraf/pull/2127): Update Go version to 1.7.4.
|
||||
- [#2126](https://github.com/influxdata/telegraf/pull/2126): Support a metric.Split function.
|
||||
- [#2026](https://github.com/influxdata/telegraf/pull/2065): elasticsearch "shield" (basic auth) support doc.
|
||||
- [#1885](https://github.com/influxdata/telegraf/pull/1885): Fix over-querying of cloudwatch metrics
|
||||
- [#1913](https://github.com/influxdata/telegraf/pull/1913): OpenTSDB basic auth support.
|
||||
- [#1908](https://github.com/influxdata/telegraf/pull/1908): RabbitMQ Connection metrics.
|
||||
- [#1937](https://github.com/influxdata/telegraf/pull/1937): HAProxy session limit metric.
|
||||
- [#2068](https://github.com/influxdata/telegraf/issues/2068): Accept strings for StatsD sets.
|
||||
- [#1893](https://github.com/influxdata/telegraf/issues/1893): Change StatsD default "reset" behavior.
|
||||
- [#2079](https://github.com/influxdata/telegraf/pull/2079): Enable setting ClientID in MQTT output.
|
||||
- [#2001](https://github.com/influxdata/telegraf/pull/2001): MongoDB input plugin: Improve state data.
|
||||
- [#2078](https://github.com/influxdata/telegraf/pull/2078): Ping input: add standard deviation field.
|
||||
- [#2121](https://github.com/influxdata/telegraf/pull/2121): Add GC pause metric to InfluxDB input plugin.
|
||||
- [#2006](https://github.com/influxdata/telegraf/pull/2006): Added response_timeout property to prometheus input plugin.
|
||||
- [#1763](https://github.com/influxdata/telegraf/issues/1763): Pulling github.com/lxn/win's pdh wrapper into telegraf.
|
||||
- [#1898](https://github.com/influxdata/telegraf/issues/1898): Support negative statsd counters.
|
||||
- [#1921](https://github.com/influxdata/telegraf/issues/1921): Elasticsearch cluster stats support.
|
||||
- [#1942](https://github.com/influxdata/telegraf/pull/1942): Change Amazon Kinesis output plugin to use the built-in serializer plugins.
|
||||
- [#1980](https://github.com/influxdata/telegraf/issues/1980): Hide username/password from elasticsearch error log messages.
|
||||
- [#2097](https://github.com/influxdata/telegraf/issues/2097): Configurable HTTP timeouts in Jolokia plugin
|
||||
- [#2255](https://github.com/influxdata/telegraf/pull/2255): Allow changing jolokia attribute delimiter
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#2049](https://github.com/influxdata/telegraf/pull/2049): Fix the Value data format not trimming null characters from input.
|
||||
- [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin.
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus
|
||||
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus.
|
||||
- [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker.
|
||||
- [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag.
|
||||
- [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters.
|
||||
- [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s).
|
||||
- [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field.
|
||||
- [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature.
|
||||
- [#1693](https://github.com/influxdata/telegraf/issues/1693): Properly collect nested jolokia struct data.
|
||||
- [#1917](https://github.com/influxdata/telegraf/pull/1917): fix puppetagent inputs plugin to support string for config variable.
|
||||
- [#1987](https://github.com/influxdata/telegraf/issues/1987): fix docker input plugin tags when registry has port.
|
||||
- [#2089](https://github.com/influxdata/telegraf/issues/2089): Fix tail input when reading from a pipe.
|
||||
- [#1449](https://github.com/influxdata/telegraf/issues/1449): MongoDB plugin always shows 0 replication lag.
|
||||
- [#1825](https://github.com/influxdata/telegraf/issues/1825): Consul plugin: add check_id as a tag in metrics to avoid overwrites.
|
||||
- [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses.
|
||||
- [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin.
|
||||
- [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix.
|
||||
- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages
|
||||
|
||||
## v1.1.2 [2016-12-12]
|
||||
|
||||
|
||||
2
Godeps
2
Godeps
@@ -52,7 +52,7 @@ github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
|
||||
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
|
||||
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
|
||||
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
|
||||
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
|
||||
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
|
||||
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
|
||||
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
|
||||
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
|
||||
github.com/lxn/win 950a0e81e7678e63d8e6cd32412bdecb325ccd88
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
|
||||
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
|
||||
|
||||
@@ -5,8 +5,8 @@ machine:
|
||||
- sudo service zookeeper stop
|
||||
- go version
|
||||
- go version | grep 1.7.4 || sudo rm -rf /usr/local/go
|
||||
- wget https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.7.4.linux-amd64.tar.gz
|
||||
- wget https://storage.googleapis.com/golang/go1.8beta1.linux-amd64.tar.gz
|
||||
- sudo tar -C /usr/local -xzf go1.8beta1.linux-amd64.tar.gz
|
||||
- go version
|
||||
|
||||
dependencies:
|
||||
|
||||
@@ -6,18 +6,24 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"plugin"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/agent"
|
||||
"github.com/influxdata/telegraf/internal/config"
|
||||
"github.com/influxdata/telegraf/logger"
|
||||
"github.com/influxdata/telegraf/plugins/aggregators"
|
||||
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/inputs/all"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
_ "github.com/influxdata/telegraf/plugins/outputs/all"
|
||||
"github.com/influxdata/telegraf/plugins/processors"
|
||||
_ "github.com/influxdata/telegraf/plugins/processors/all"
|
||||
"github.com/kardianos/service"
|
||||
)
|
||||
@@ -50,6 +56,8 @@ var fUsage = flag.String("usage", "",
|
||||
"print usage for a plugin, ie, 'telegraf -usage mysql'")
|
||||
var fService = flag.String("service", "",
|
||||
"operate on the service")
|
||||
var fPlugins = flag.String("plugins", "",
|
||||
"path to directory containing external plugins")
|
||||
|
||||
// Telegraf version, populated linker.
|
||||
// ie, -ldflags "-X main.version=`git describe --always --tags`"
|
||||
@@ -304,9 +312,93 @@ func (p *program) Stop(s service.Service) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.)
|
||||
// in the specified directory.
|
||||
func loadExternalPlugins(dir string) error {
|
||||
return filepath.Walk(dir, func(pth string, info os.FileInfo, err error) error {
|
||||
// Stop if there was an error.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ignore directories.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ignore files that aren't shared libraries.
|
||||
ext := strings.ToLower(path.Ext(pth))
|
||||
if ext != ".so" && ext != ".dll" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load plugin.
|
||||
p, err := plugin.Open(pth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Register plugin.
|
||||
if err := registerPlugin(dir, pth, p); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// registerPlugin registers an external plugin with telegraf.
|
||||
func registerPlugin(pluginsDir, filePath string, p *plugin.Plugin) error {
|
||||
// Clean the file path and make sure it's relative to the root plugins directory.
|
||||
// This is done because plugin names are namespaced using the directory
|
||||
// structure. E.g., if the root plugin directory, passed in the pluginsDir
|
||||
// argument, is '/home/jdoe/bin/telegraf/plugins' and we're registering plugin
|
||||
// '/home/jdoe/bin/telegraf/plugins/input/mysql.so'
|
||||
pluginsDir = filepath.Clean(pluginsDir)
|
||||
parentDir, _ := filepath.Split(pluginsDir)
|
||||
var err error
|
||||
if filePath, err = filepath.Rel(parentDir, filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
// Strip the file extension and save it.
|
||||
ext := path.Ext(filePath)
|
||||
filePath = strings.TrimSuffix(filePath, ext)
|
||||
// Convert path separators to "." to generate a plugin name namespaced by directory names.
|
||||
name := strings.Replace(filePath, string(os.PathSeparator), ".", -1)
|
||||
|
||||
if create, err := p.Lookup("NewInput"); err == nil {
|
||||
inputs.Add(name, inputs.Creator(create.(func() telegraf.Input)))
|
||||
} else if create, err := p.Lookup("NewOutput"); err == nil {
|
||||
outputs.Add(name, outputs.Creator(create.(func() telegraf.Output)))
|
||||
} else if create, err := p.Lookup("NewProcessor"); err == nil {
|
||||
processors.Add(name, processors.Creator(create.(func() telegraf.Processor)))
|
||||
} else if create, err := p.Lookup("NewAggregator"); err == nil {
|
||||
aggregators.Add(name, aggregators.Creator(create.(func() telegraf.Aggregator)))
|
||||
} else {
|
||||
return fmt.Errorf("not a telegraf plugin: %s%s", filePath, ext)
|
||||
}
|
||||
|
||||
log.Printf("I! Registered: %s (from %s%s)\n", name, filePath, ext)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() { usageExit(0) }
|
||||
flag.Parse()
|
||||
|
||||
// Load external plugins, if requested.
|
||||
if *fPlugins != "" {
|
||||
pluginsDir, err := filepath.Abs(*fPlugins)
|
||||
if err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
log.Printf("I! Loading external plugins from: %s\n", pluginsDir)
|
||||
if err := loadExternalPlugins(*fPlugins); err != nil {
|
||||
log.Fatal("E! " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
svcConfig := &service.Config{
|
||||
Name: "telegraf",
|
||||
|
||||
@@ -140,6 +140,8 @@
|
||||
# # retention_policy = "default"
|
||||
# ## InfluxDB database
|
||||
# # database = "telegraf"
|
||||
# ## InfluxDB precision
|
||||
# # precision = "s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
@@ -188,11 +190,6 @@
|
||||
# # timeout = "5s"
|
||||
|
||||
|
||||
# # Send metrics to nowhere at all
|
||||
# [[outputs.discard]]
|
||||
# # no configuration
|
||||
|
||||
|
||||
# # Send telegraf metrics to file(s)
|
||||
# [[outputs.file]]
|
||||
# ## Files to write to, "stdout" is a specially handled file.
|
||||
@@ -222,7 +219,7 @@
|
||||
|
||||
# # Send telegraf metrics to graylog(s)
|
||||
# [[outputs.graylog]]
|
||||
# ## UDP endpoint for your graylog instance.
|
||||
# ## Udp endpoint for your graylog instance.
|
||||
# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
|
||||
|
||||
|
||||
@@ -315,13 +312,9 @@
|
||||
# streamname = "StreamName"
|
||||
# ## PartitionKey as used for sharding data.
|
||||
# partitionkey = "PartitionKey"
|
||||
#
|
||||
# ## Data format to output.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
# data_format = "influx"
|
||||
#
|
||||
# ## format of the Data payload in the kinesis PutRecord, supported
|
||||
# ## String and Custom.
|
||||
# format = "string"
|
||||
# ## debug will show upstream aws messages.
|
||||
# debug = false
|
||||
|
||||
@@ -358,9 +351,6 @@
|
||||
# # username = "telegraf"
|
||||
# # password = "metricsmetricsmetricsmetrics"
|
||||
#
|
||||
# ## client ID, if not set a random ID is generated
|
||||
# # client_id = ""
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -438,9 +428,6 @@
|
||||
# [[outputs.prometheus_client]]
|
||||
# ## Address to listen on
|
||||
# # listen = ":9126"
|
||||
#
|
||||
# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
|
||||
# # expiration_interval = "60s"
|
||||
|
||||
|
||||
# # Configuration for the Riemann server to send metrics to
|
||||
@@ -551,19 +538,6 @@
|
||||
# ## An array of Apache status URI to gather stats.
|
||||
# ## Default is "http://localhost/server-status?auto".
|
||||
# urls = ["http://localhost/server-status?auto"]
|
||||
# ## user credentials for basic HTTP authentication
|
||||
# username = "myuser"
|
||||
# password = "mypassword"
|
||||
#
|
||||
# ## Timeout to the complete conection and reponse time in seconds
|
||||
# response_timeout = "25s" ## default to 5 seconds
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
# # ssl_key = "/etc/telegraf/key.pem"
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
|
||||
|
||||
# # Read metrics of bcache from stats_total and dirty_data
|
||||
@@ -666,13 +640,6 @@
|
||||
# #profile = ""
|
||||
# #shared_credential_file = ""
|
||||
#
|
||||
# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
|
||||
# # metrics are made available to the 1 minute period. Some are collected at
|
||||
# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
|
||||
# # Note that if a period is configured that is smaller than the minimum for a
|
||||
# # particular metric, that metric will not be returned by the Cloudwatch API
|
||||
# # and will not be collected by Telegraf.
|
||||
# #
|
||||
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
|
||||
# period = "5m"
|
||||
#
|
||||
@@ -817,18 +784,13 @@
|
||||
# ## Timeout for HTTP requests to the elastic search server(s)
|
||||
# http_timeout = "5s"
|
||||
#
|
||||
# ## When local is true (the default), the node will read only its own stats.
|
||||
# ## Set local to false when you want to read the node stats from all nodes
|
||||
# ## of the cluster.
|
||||
# ## set local to false when you want to read the indices stats from all nodes
|
||||
# ## within the cluster
|
||||
# local = true
|
||||
#
|
||||
# ## Set cluster_health to true when you want to also obtain cluster health stats
|
||||
# ## set cluster_health to true when you want to also obtain cluster level stats
|
||||
# cluster_health = false
|
||||
#
|
||||
# ## Set cluster_stats to true when you want to also obtain cluster stats from the
|
||||
# ## Master node.
|
||||
# cluster_stats = false
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# # ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -1013,12 +975,6 @@
|
||||
# timeout = "5s"
|
||||
|
||||
|
||||
# # Collect statistics about itself
|
||||
# [[inputs.internal]]
|
||||
# ## If true, collect telegraf memory stats.
|
||||
# # collect_memstats = true
|
||||
|
||||
|
||||
# # Read metrics from one or many bare metal servers
|
||||
# [[inputs.ipmi_sensor]]
|
||||
# ## specify servers via a url matching:
|
||||
@@ -1032,9 +988,8 @@
|
||||
# # Read JMX metrics through Jolokia
|
||||
# [[inputs.jolokia]]
|
||||
# ## This is the context root used to compose the jolokia url
|
||||
# ## NOTE that Jolokia requires a trailing slash at the end of the context root
|
||||
# ## NOTE that your jolokia security policy must allow for POST requests.
|
||||
# context = "/jolokia/"
|
||||
# context = "/jolokia"
|
||||
#
|
||||
# ## This specifies the mode used
|
||||
# # mode = "proxy"
|
||||
@@ -1046,15 +1001,6 @@
|
||||
# # host = "127.0.0.1"
|
||||
# # port = "8080"
|
||||
#
|
||||
# ## Optional http timeouts
|
||||
# ##
|
||||
# ## response_header_timeout, if non-zero, specifies the amount of time to wait
|
||||
# ## for a server's response headers after fully writing the request.
|
||||
# # response_header_timeout = "3s"
|
||||
# ##
|
||||
# ## client_timeout specifies a time limit for requests made by this client.
|
||||
# ## Includes connection time, any redirects, and reading the response body.
|
||||
# # client_timeout = "4s"
|
||||
#
|
||||
# ## List of servers exposing jolokia read service
|
||||
# [[inputs.jolokia.servers]]
|
||||
@@ -1193,8 +1139,8 @@
|
||||
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||
# ## e.g.
|
||||
# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
|
||||
# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
|
||||
# ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false
|
||||
# ## db_user@tcp(127.0.0.1:3306)/?tls=false
|
||||
# #
|
||||
# ## If no servers are specified, then localhost is used as the host.
|
||||
# servers = ["tcp(127.0.0.1:3306)/"]
|
||||
@@ -1255,24 +1201,18 @@
|
||||
# # TCP or UDP 'ping' given url and collect response time in seconds
|
||||
# [[inputs.net_response]]
|
||||
# ## Protocol, must be "tcp" or "udp"
|
||||
# ## NOTE: because the "udp" protocol does not respond to requests, it requires
|
||||
# ## a send/expect string pair (see below).
|
||||
# protocol = "tcp"
|
||||
# ## Server address (default localhost)
|
||||
# address = "localhost:80"
|
||||
# address = "github.com:80"
|
||||
# ## Set timeout
|
||||
# timeout = "1s"
|
||||
#
|
||||
# ## Optional string sent to the server
|
||||
# # send = "ssh"
|
||||
# ## Optional expected string in answer
|
||||
# # expect = "ssh"
|
||||
# ## Set read timeout (only used if expecting a response)
|
||||
# read_timeout = "1s"
|
||||
#
|
||||
# ## The following options are required for UDP checks. For TCP, they are
|
||||
# ## optional. The plugin will send the given string to the server and then
|
||||
# ## expect to receive the given 'expect' string back.
|
||||
# ## string sent to the server
|
||||
# # send = "ssh"
|
||||
# ## expected string in answer
|
||||
# # expect = "ssh"
|
||||
|
||||
|
||||
# # Read TCP metrics such as established, time wait and sockets counts.
|
||||
@@ -1474,8 +1414,6 @@
|
||||
# prefix = ""
|
||||
# ## comment this out if you want raw cpu_time stats
|
||||
# fielddrop = ["cpu_time_*"]
|
||||
# ## This is optional; moves pid into a tag instead of a field
|
||||
# pid_tag = false
|
||||
|
||||
|
||||
# # Read metrics from one or many prometheus clients
|
||||
@@ -1486,9 +1424,6 @@
|
||||
# ## Use bearer token for authorization
|
||||
# # bearer_token = /path/to/bearer/token
|
||||
#
|
||||
# ## Specify timeout duration for slower prometheus clients (default is 3s)
|
||||
# # response_timeout = "3s"
|
||||
#
|
||||
# ## Optional SSL Config
|
||||
# # ssl_ca = /path/to/cafile
|
||||
# # ssl_cert = /path/to/certfile
|
||||
@@ -1517,16 +1452,6 @@
|
||||
# ## Use SSL but skip chain & host verification
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Optional request timeouts
|
||||
# ##
|
||||
# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
|
||||
# ## for a server's response headers after fully writing the request.
|
||||
# # header_timeout = "3s"
|
||||
# ##
|
||||
# ## client_timeout specifies a time limit for requests made by this client.
|
||||
# ## Includes connection time, any redirects, and reading the response body.
|
||||
# # client_timeout = "4s"
|
||||
#
|
||||
# ## A list of nodes to pull metrics about. If not specified, metrics for
|
||||
# ## all nodes are gathered.
|
||||
# # nodes = ["rabbit@node1", "rabbit@node2"]
|
||||
@@ -1949,19 +1874,14 @@
|
||||
# [[inputs.statsd]]
|
||||
# ## Address and port to host UDP listener on
|
||||
# service_address = ":8125"
|
||||
#
|
||||
# ## The following configuration options control when telegraf clears it's cache
|
||||
# ## of previous values. If set to false, then telegraf will only clear it's
|
||||
# ## cache when the daemon is restarted.
|
||||
# ## Reset gauges every interval (default=true)
|
||||
# delete_gauges = true
|
||||
# ## Reset counters every interval (default=true)
|
||||
# delete_counters = true
|
||||
# ## Reset sets every interval (default=true)
|
||||
# delete_sets = true
|
||||
# ## Reset timings & histograms every interval (default=true)
|
||||
# ## Delete gauges every interval (default=false)
|
||||
# delete_gauges = false
|
||||
# ## Delete counters every interval (default=false)
|
||||
# delete_counters = false
|
||||
# ## Delete sets every interval (default=false)
|
||||
# delete_sets = false
|
||||
# ## Delete timings & histograms every interval (default=true)
|
||||
# delete_timings = true
|
||||
#
|
||||
# ## Percentiles to calculate for timing & histogram stats
|
||||
# percentiles = [90]
|
||||
#
|
||||
@@ -2002,8 +1922,6 @@
|
||||
# files = ["/var/mymetrics.out"]
|
||||
# ## Read file from beginning.
|
||||
# from_beginning = false
|
||||
# ## Whether file is a named pipe
|
||||
# pipe = false
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
@@ -2040,10 +1958,6 @@
|
||||
# ## UDP listener will start dropping packets.
|
||||
# # allowed_pending_messages = 10000
|
||||
#
|
||||
# ## Set the buffer size of the UDP connection outside of OS default (in bytes)
|
||||
# ## If set to 0, take OS default
|
||||
# udp_buffer_size = 16777216
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has it's own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
@@ -2067,4 +1981,3 @@
|
||||
#
|
||||
# [inputs.webhooks.rollbar]
|
||||
# path = "/rollbar"
|
||||
|
||||
|
||||
@@ -70,9 +70,7 @@ func (r *RunningAggregator) MakeMetric(
|
||||
t,
|
||||
)
|
||||
|
||||
if m != nil {
|
||||
m.SetAggregate(true)
|
||||
}
|
||||
m.SetAggregate(true)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (r *RunningInput) MakeMetric(
|
||||
)
|
||||
|
||||
if r.trace && m != nil {
|
||||
fmt.Print("> " + m.String())
|
||||
fmt.Println("> " + m.String())
|
||||
}
|
||||
|
||||
r.MetricsGathered.Incr(1)
|
||||
|
||||
@@ -289,6 +289,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
|
||||
requestUrl.User = url.UserPassword(serverTokens["user"],
|
||||
serverTokens["passwd"])
|
||||
}
|
||||
fmt.Printf("host %s url %s\n", serverTokens["host"], requestUrl)
|
||||
|
||||
out, err := c.getAttr(requestUrl)
|
||||
if out["status"] != 200.0 {
|
||||
|
||||
@@ -82,7 +82,7 @@ the cluster. The currently supported commands are:
|
||||
|
||||
## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config
|
||||
## to be specified
|
||||
gather_cluster_stats = false
|
||||
gather_cluster_stats = true
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
@@ -68,7 +68,7 @@ var sampleConfig = `
|
||||
gather_admin_socket_stats = true
|
||||
|
||||
## Whether to gather statistics via ceph commands
|
||||
gather_cluster_stats = false
|
||||
gather_cluster_stats = true
|
||||
`
|
||||
|
||||
func (c *Ceph) SampleConfig() string {
|
||||
|
||||
@@ -126,7 +126,11 @@ func (c *CloudWatch) Description() string {
|
||||
return "Pull Metric Statistics from Amazon CloudWatch"
|
||||
}
|
||||
|
||||
func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) {
|
||||
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if c.client == nil {
|
||||
c.initializeCloudWatch()
|
||||
}
|
||||
|
||||
var metrics []*cloudwatch.Metric
|
||||
|
||||
// check for provided metric filter
|
||||
@@ -151,11 +155,11 @@ func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) {
|
||||
} else {
|
||||
allMetrics, err := c.fetchNamespaceMetrics()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
for _, name := range m.MetricNames {
|
||||
for _, metric := range allMetrics {
|
||||
if isSelected(name, metric, m.Dimensions) {
|
||||
if isSelected(metric, m.Dimensions) {
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String(c.Namespace),
|
||||
MetricName: aws.String(name),
|
||||
@@ -165,26 +169,16 @@ func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
metrics, err = c.fetchNamespaceMetrics()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
|
||||
if c.client == nil {
|
||||
c.initializeCloudWatch()
|
||||
}
|
||||
|
||||
metrics, err := SelectMetrics(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metricCount := len(metrics)
|
||||
errChan := errchan.New(metricCount)
|
||||
|
||||
@@ -386,10 +380,7 @@ func hasWilcard(dimensions []*Dimension) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) bool {
|
||||
if name != *metric.MetricName {
|
||||
return false
|
||||
}
|
||||
func isSelected(metric *cloudwatch.Metric, dimensions []*Dimension) bool {
|
||||
if len(metric.Dimensions) != len(dimensions) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type mockGatherCloudWatchClient struct{}
|
||||
type mockCloudWatchClient struct{}
|
||||
|
||||
func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
|
||||
func (m *mockCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
|
||||
metric := &cloudwatch.Metric{
|
||||
Namespace: params.Namespace,
|
||||
MetricName: aws.String("Latency"),
|
||||
@@ -31,7 +31,7 @@ func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsI
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *mockGatherCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
|
||||
func (m *mockCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
|
||||
dataPoint := &cloudwatch.Datapoint{
|
||||
Timestamp: params.EndTime,
|
||||
Minimum: aws.Float64(0.1),
|
||||
@@ -62,7 +62,7 @@ func TestGather(t *testing.T) {
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
c.client = &mockGatherCloudWatchClient{}
|
||||
c.client = &mockCloudWatchClient{}
|
||||
|
||||
c.Gather(&acc)
|
||||
|
||||
@@ -83,94 +83,6 @@ func TestGather(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
type mockSelectMetricsCloudWatchClient struct{}
|
||||
|
||||
func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) {
|
||||
metrics := []*cloudwatch.Metric{}
|
||||
// 4 metrics are available
|
||||
metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"}
|
||||
// for 3 ELBs
|
||||
loadBalancers := []string{"lb-1", "lb-2", "lb-3"}
|
||||
// in 2 AZs
|
||||
availabilityZones := []string{"us-east-1a", "us-east-1b"}
|
||||
for _, m := range metricNames {
|
||||
for _, lb := range loadBalancers {
|
||||
// For each metric/ELB pair, we get an aggregate value across all AZs.
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String("AWS/ELB"),
|
||||
MetricName: aws.String(m),
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
&cloudwatch.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String(lb),
|
||||
},
|
||||
},
|
||||
})
|
||||
for _, az := range availabilityZones {
|
||||
// We get a metric for each metric/ELB/AZ triplet.
|
||||
metrics = append(metrics, &cloudwatch.Metric{
|
||||
Namespace: aws.String("AWS/ELB"),
|
||||
MetricName: aws.String(m),
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
&cloudwatch.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
Value: aws.String(lb),
|
||||
},
|
||||
&cloudwatch.Dimension{
|
||||
Name: aws.String("AvailabilityZone"),
|
||||
Value: aws.String(az),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result := &cloudwatch.ListMetricsOutput{
|
||||
Metrics: metrics,
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *mockSelectMetricsCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestSelectMetrics(t *testing.T) {
|
||||
duration, _ := time.ParseDuration("1m")
|
||||
internalDuration := internal.Duration{
|
||||
Duration: duration,
|
||||
}
|
||||
c := &CloudWatch{
|
||||
Region: "us-east-1",
|
||||
Namespace: "AWS/ELB",
|
||||
Delay: internalDuration,
|
||||
Period: internalDuration,
|
||||
RateLimit: 10,
|
||||
Metrics: []*Metric{
|
||||
&Metric{
|
||||
MetricNames: []string{"Latency", "RequestCount"},
|
||||
Dimensions: []*Dimension{
|
||||
&Dimension{
|
||||
Name: "LoadBalancerName",
|
||||
Value: "*",
|
||||
},
|
||||
&Dimension{
|
||||
Name: "AvailabilityZone",
|
||||
Value: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
c.client = &mockSelectMetricsCloudWatchClient{}
|
||||
metrics, err := SelectMetrics(c)
|
||||
// We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2
|
||||
// AZs. We should get 12 metrics.
|
||||
assert.Equal(t, 12, len(metrics))
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestGenerateStatisticsInputParams(t *testing.T) {
|
||||
d := &cloudwatch.Dimension{
|
||||
Name: aws.String("LoadBalancerName"),
|
||||
|
||||
@@ -29,9 +29,9 @@ to query the data. It will not report the [telemetry](https://www.consul.io/docs
|
||||
Tags:
|
||||
- node: on which node check/service is registered on
|
||||
- service_name: name of the service (this is the service name not the service ID)
|
||||
- check_id
|
||||
|
||||
Fields:
|
||||
- check_id
|
||||
- check_name
|
||||
- service_id
|
||||
- status
|
||||
@@ -41,6 +41,6 @@ Fields:
|
||||
```
|
||||
$ telegraf --config ./telegraf.conf -input-filter consul -test
|
||||
* Plugin: consul, Collection 1
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node check_id="serfHealth",check_name="Serf Health Status",service_id="",status="passing" 1464698464486439902
|
||||
> consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com check_id="service:www-example-com.test01",check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical" 1464698464486519036
|
||||
```
|
||||
|
||||
@@ -95,13 +95,13 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt
|
||||
record := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
||||
record["check_id"] = check.CheckID
|
||||
record["check_name"] = check.Name
|
||||
record["service_id"] = check.ServiceID
|
||||
record["status"] = check.Status
|
||||
|
||||
tags["node"] = check.Node
|
||||
tags["service_name"] = check.ServiceName
|
||||
tags["check_id"] = check.CheckID
|
||||
|
||||
acc.AddFields("consul_health_checks", record, tags)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ var sampleChecks = []*api.HealthCheck{
|
||||
|
||||
func TestGatherHealtCheck(t *testing.T) {
|
||||
expectedFields := map[string]interface{}{
|
||||
"check_id": "foo.health123",
|
||||
"check_name": "foo.health",
|
||||
"status": "passing",
|
||||
"service_id": "foo.123",
|
||||
@@ -30,7 +31,6 @@ func TestGatherHealtCheck(t *testing.T) {
|
||||
expectedTags := map[string]string{
|
||||
"node": "localhost",
|
||||
"service_name": "foo",
|
||||
"check_id": "foo.health123",
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
@@ -221,18 +221,14 @@ func (d *Docker) gatherContainer(
|
||||
cname = strings.TrimPrefix(container.Names[0], "/")
|
||||
}
|
||||
|
||||
// the image name sometimes has a version part, or a private repo
|
||||
// ie, rabbitmq:3-management or docker.someco.net:4443/rabbitmq:3-management
|
||||
imageName := ""
|
||||
// the image name sometimes has a version part.
|
||||
// ie, rabbitmq:3-management
|
||||
imageParts := strings.Split(container.Image, ":")
|
||||
imageName := imageParts[0]
|
||||
imageVersion := "unknown"
|
||||
i := strings.LastIndex(container.Image, ":") // index of last ':' character
|
||||
if i > -1 {
|
||||
imageVersion = container.Image[i+1:]
|
||||
imageName = container.Image[:i]
|
||||
} else {
|
||||
imageName = container.Image
|
||||
if len(imageParts) > 1 {
|
||||
imageVersion = imageParts[1]
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"engine_host": d.engine_host,
|
||||
"container_name": cname,
|
||||
@@ -368,22 +364,11 @@ func gatherContainerStats(
|
||||
if field == "container_id" {
|
||||
continue
|
||||
}
|
||||
|
||||
var uintV uint64
|
||||
switch v := value.(type) {
|
||||
case uint64:
|
||||
uintV = v
|
||||
case int64:
|
||||
uintV = uint64(v)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := totalNetworkStatMap[field]
|
||||
if ok {
|
||||
totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + uintV
|
||||
totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + value.(uint64)
|
||||
} else {
|
||||
totalNetworkStatMap[field] = uintV
|
||||
totalNetworkStatMap[field] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -502,22 +487,11 @@ func gatherBlockIOMetrics(
|
||||
if field == "container_id" {
|
||||
continue
|
||||
}
|
||||
|
||||
var uintV uint64
|
||||
switch v := value.(type) {
|
||||
case uint64:
|
||||
uintV = v
|
||||
case int64:
|
||||
uintV = uint64(v)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := totalStatMap[field]
|
||||
if ok {
|
||||
totalStatMap[field] = totalStatMap[field].(uint64) + uintV
|
||||
totalStatMap[field] = totalStatMap[field].(uint64) + value.(uint64)
|
||||
} else {
|
||||
totalStatMap[field] = uintV
|
||||
totalStatMap[field] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,7 +340,7 @@ func (d FakeDockerClient) ContainerList(octx context.Context, options types.Cont
|
||||
container2 := types.Container{
|
||||
ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
|
||||
Names: []string{"/etcd2"},
|
||||
Image: "quay.io:4443/coreos/etcd:v2.2.2",
|
||||
Image: "quay.io/coreos/etcd:v2.2.2",
|
||||
Command: "/etcd -name etcd2 -advertise-client-urls http://localhost:2379 -listen-client-urls http://0.0.0.0:2379",
|
||||
Created: 1455941933,
|
||||
Status: "Up 4 hours",
|
||||
@@ -429,7 +429,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
},
|
||||
map[string]string{
|
||||
"container_name": "etcd2",
|
||||
"container_image": "quay.io:4443/coreos/etcd",
|
||||
"container_image": "quay.io/coreos/etcd",
|
||||
"cpu": "cpu3",
|
||||
"container_version": "v2.2.2",
|
||||
"engine_host": "absol",
|
||||
@@ -477,7 +477,7 @@ func TestDockerGatherInfo(t *testing.T) {
|
||||
map[string]string{
|
||||
"engine_host": "absol",
|
||||
"container_name": "etcd2",
|
||||
"container_image": "quay.io:4443/coreos/etcd",
|
||||
"container_image": "quay.io/coreos/etcd",
|
||||
"container_version": "v2.2.2",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain
|
||||
[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html)
|
||||
and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html)
|
||||
or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics.
|
||||
and optionally [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) stats.
|
||||
|
||||
### Configuration:
|
||||
|
||||
@@ -15,18 +14,13 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre
|
||||
## Timeout for HTTP requests to the elastic search server(s)
|
||||
http_timeout = "5s"
|
||||
|
||||
## When local is true (the default), the node will read only its own stats.
|
||||
## Set local to false when you want to read the node stats from all nodes
|
||||
## of the cluster.
|
||||
## set local to false when you want to read the indices stats from all nodes
|
||||
## within the cluster
|
||||
local = true
|
||||
|
||||
## Set cluster_health to true when you want to also obtain cluster health stats
|
||||
## set cluster_health to true when you want to also obtain cluster level stats
|
||||
cluster_health = false
|
||||
|
||||
## Set cluster_stats to true when you want to obtain cluster stats from the
|
||||
## Master node.
|
||||
cluster_stats = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -13,18 +12,13 @@ import (
|
||||
"github.com/influxdata/telegraf/internal/errchan"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// mask for masking username/password from error messages
|
||||
var mask = regexp.MustCompile(`https?:\/\/\S+:\S+@`)
|
||||
|
||||
// Nodestats are always generated, so simply define a constant for these endpoints
|
||||
const statsPath = "/_nodes/stats"
|
||||
const statsPathLocal = "/_nodes/_local/stats"
|
||||
const healthPath = "/_cluster/health"
|
||||
|
||||
type nodeStat struct {
|
||||
type node struct {
|
||||
Host string `json:"host"`
|
||||
Name string `json:"name"`
|
||||
Attributes map[string]string `json:"attributes"`
|
||||
@@ -64,20 +58,6 @@ type indexHealth struct {
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
}
|
||||
|
||||
type clusterStats struct {
|
||||
NodeName string `json:"node_name"`
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Status string `json:"status"`
|
||||
Indices interface{} `json:"indices"`
|
||||
Nodes interface{} `json:"nodes"`
|
||||
}
|
||||
|
||||
type catMaster struct {
|
||||
NodeID string `json:"id"`
|
||||
NodeIP string `json:"ip"`
|
||||
NodeName string `json:"node"`
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## specify a list of one or more Elasticsearch servers
|
||||
# you can add username and password to your url to use basic authentication:
|
||||
@@ -87,18 +67,13 @@ const sampleConfig = `
|
||||
## Timeout for HTTP requests to the elastic search server(s)
|
||||
http_timeout = "5s"
|
||||
|
||||
## When local is true (the default), the node will read only its own stats.
|
||||
## Set local to false when you want to read the node stats from all nodes
|
||||
## of the cluster.
|
||||
## set local to false when you want to read the indices stats from all nodes
|
||||
## within the cluster
|
||||
local = true
|
||||
|
||||
## Set cluster_health to true when you want to also obtain cluster health stats
|
||||
## set cluster_health to true when you want to also obtain cluster level stats
|
||||
cluster_health = false
|
||||
|
||||
## Set cluster_stats to true when you want to also obtain cluster stats from the
|
||||
## Master node.
|
||||
cluster_stats = false
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -110,18 +85,15 @@ const sampleConfig = `
|
||||
// Elasticsearch is a plugin to read stats from one or many Elasticsearch
|
||||
// servers.
|
||||
type Elasticsearch struct {
|
||||
Local bool
|
||||
Servers []string
|
||||
HttpTimeout internal.Duration
|
||||
ClusterHealth bool
|
||||
ClusterStats bool
|
||||
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||
SSLCert string `toml:"ssl_cert"` // Path to host cert file
|
||||
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
||||
InsecureSkipVerify bool // Use SSL but skip chain & host verification
|
||||
client *http.Client
|
||||
catMasterResponseTokens []string
|
||||
isMaster bool
|
||||
Local bool
|
||||
Servers []string
|
||||
HttpTimeout internal.Duration
|
||||
ClusterHealth bool
|
||||
SSLCA string `toml:"ssl_ca"` // Path to CA file
|
||||
SSLCert string `toml:"ssl_cert"` // Path to host cert file
|
||||
SSLKey string `toml:"ssl_key"` // Path to cert key file
|
||||
InsecureSkipVerify bool // Use SSL but skip chain & host verification
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewElasticsearch return a new instance of Elasticsearch
|
||||
@@ -153,7 +125,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
e.client = client
|
||||
}
|
||||
|
||||
errChan := errchan.New(len(e.Servers) * 3)
|
||||
errChan := errchan.New(len(e.Servers))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(e.Servers))
|
||||
|
||||
@@ -166,36 +138,12 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
|
||||
} else {
|
||||
url = s + statsPath
|
||||
}
|
||||
e.isMaster = false
|
||||
|
||||
if e.ClusterStats {
|
||||
// get cat/master information here so NodeStats can determine
|
||||
// whether this node is the Master
|
||||
e.setCatMaster(s + "/_cat/master")
|
||||
}
|
||||
|
||||
// Always gather node states
|
||||
if err := e.gatherNodeStats(url, acc); err != nil {
|
||||
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))
|
||||
errChan.C <- err
|
||||
return
|
||||
}
|
||||
|
||||
if e.ClusterHealth {
|
||||
url = s + "/_cluster/health?level=indices"
|
||||
if err := e.gatherClusterHealth(url, acc); err != nil {
|
||||
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))
|
||||
errChan.C <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if e.ClusterStats && e.isMaster {
|
||||
if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil {
|
||||
err = fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))
|
||||
errChan.C <- err
|
||||
return
|
||||
}
|
||||
e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
|
||||
}
|
||||
}(serv, acc)
|
||||
}
|
||||
@@ -223,13 +171,12 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) {
|
||||
|
||||
func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error {
|
||||
nodeStats := &struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*nodeStat `json:"nodes"`
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Nodes map[string]*node `json:"nodes"`
|
||||
}{}
|
||||
if err := e.gatherJsonData(url, nodeStats); err != nil {
|
||||
if err := e.gatherData(url, nodeStats); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for id, n := range nodeStats.Nodes {
|
||||
tags := map[string]string{
|
||||
"node_id": id,
|
||||
@@ -238,11 +185,6 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
|
||||
"cluster_name": nodeStats.ClusterName,
|
||||
}
|
||||
|
||||
if e.ClusterStats {
|
||||
// check for master
|
||||
e.isMaster = (id == e.catMasterResponseTokens[0])
|
||||
}
|
||||
|
||||
for k, v := range n.Attributes {
|
||||
tags["node_attribute_"+k] = v
|
||||
}
|
||||
@@ -262,7 +204,6 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
|
||||
now := time.Now()
|
||||
for p, s := range stats {
|
||||
f := jsonparser.JSONFlattener{}
|
||||
// parse Json, ignoring strings and bools
|
||||
err := f.FlattenJSON("", s)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -273,31 +214,31 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator) error {
|
||||
healthStats := &clusterHealth{}
|
||||
if err := e.gatherJsonData(url, healthStats); err != nil {
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
|
||||
clusterStats := &clusterHealth{}
|
||||
if err := e.gatherData(url, clusterStats); err != nil {
|
||||
return err
|
||||
}
|
||||
measurementTime := time.Now()
|
||||
clusterFields := map[string]interface{}{
|
||||
"status": healthStats.Status,
|
||||
"timed_out": healthStats.TimedOut,
|
||||
"number_of_nodes": healthStats.NumberOfNodes,
|
||||
"number_of_data_nodes": healthStats.NumberOfDataNodes,
|
||||
"active_primary_shards": healthStats.ActivePrimaryShards,
|
||||
"active_shards": healthStats.ActiveShards,
|
||||
"relocating_shards": healthStats.RelocatingShards,
|
||||
"initializing_shards": healthStats.InitializingShards,
|
||||
"unassigned_shards": healthStats.UnassignedShards,
|
||||
"status": clusterStats.Status,
|
||||
"timed_out": clusterStats.TimedOut,
|
||||
"number_of_nodes": clusterStats.NumberOfNodes,
|
||||
"number_of_data_nodes": clusterStats.NumberOfDataNodes,
|
||||
"active_primary_shards": clusterStats.ActivePrimaryShards,
|
||||
"active_shards": clusterStats.ActiveShards,
|
||||
"relocating_shards": clusterStats.RelocatingShards,
|
||||
"initializing_shards": clusterStats.InitializingShards,
|
||||
"unassigned_shards": clusterStats.UnassignedShards,
|
||||
}
|
||||
acc.AddFields(
|
||||
"elasticsearch_cluster_health",
|
||||
clusterFields,
|
||||
map[string]string{"name": healthStats.ClusterName},
|
||||
map[string]string{"name": clusterStats.ClusterName},
|
||||
measurementTime,
|
||||
)
|
||||
|
||||
for name, health := range healthStats.Indices {
|
||||
for name, health := range clusterStats.Indices {
|
||||
indexFields := map[string]interface{}{
|
||||
"status": health.Status,
|
||||
"number_of_shards": health.NumberOfShards,
|
||||
@@ -318,60 +259,7 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error {
|
||||
clusterStats := &clusterStats{}
|
||||
if err := e.gatherJsonData(url, clusterStats); err != nil {
|
||||
return err
|
||||
}
|
||||
now := time.Now()
|
||||
tags := map[string]string{
|
||||
"node_name": clusterStats.NodeName,
|
||||
"cluster_name": clusterStats.ClusterName,
|
||||
"status": clusterStats.Status,
|
||||
}
|
||||
|
||||
stats := map[string]interface{}{
|
||||
"nodes": clusterStats.Nodes,
|
||||
"indices": clusterStats.Indices,
|
||||
}
|
||||
|
||||
for p, s := range stats {
|
||||
f := jsonparser.JSONFlattener{}
|
||||
// parse json, including bools and strings
|
||||
err := f.FullFlattenJSON("", s, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc.AddFields("elasticsearch_clusterstats_"+p, f.Fields, tags, now)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) setCatMaster(url string) error {
|
||||
r, err := e.client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
// NOTE: we are not going to read/discard r.Body under the assumption we'd prefer
|
||||
// to let the underlying transport close the connection and re-establish a new one for
|
||||
// future calls.
|
||||
return fmt.Errorf("status-code %d, expected %d", r.StatusCode, http.StatusOK)
|
||||
}
|
||||
response, err := ioutil.ReadAll(r.Body)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e.catMasterResponseTokens = strings.Split(string(response), " ")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error {
|
||||
func (e *Elasticsearch) gatherData(url string, v interface{}) error {
|
||||
r, err := e.client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -384,11 +272,9 @@ func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error {
|
||||
return fmt.Errorf("elasticsearch: API responded with status-code %d, expected %d",
|
||||
r.StatusCode, http.StatusOK)
|
||||
}
|
||||
|
||||
if err = json.NewDecoder(r.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -39,13 +37,16 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
func (t *transportMock) CancelRequest(_ *http.Request) {
|
||||
}
|
||||
|
||||
func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) {
|
||||
if es.isMaster != expected {
|
||||
msg := fmt.Sprintf("IsMaster set incorrectly")
|
||||
assert.Fail(t, msg)
|
||||
func TestElasticsearch(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, statsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := es.Gather(&acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
|
||||
|
||||
tags := map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_attribute_master": "true",
|
||||
@@ -54,55 +55,25 @@ func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) {
|
||||
"node_host": "test",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_indices", indicesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_os", osExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_process", processExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", jvmExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", threadPoolExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_fs", fsExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_transport", transportExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_http", httpExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", breakersExpected, tags)
|
||||
}
|
||||
|
||||
func TestGather(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := es.Gather(&acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
checkNodeStatsResult(t, &acc)
|
||||
}
|
||||
|
||||
func TestGatherNodeStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
checkNodeStatsResult(t, &acc)
|
||||
}
|
||||
|
||||
func TestGatherClusterHealth(t *testing.T) {
|
||||
func TestGatherClusterStats(t *testing.T) {
|
||||
es := newElasticsearchWithClient()
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
es.ClusterHealth = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse)
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterResponse)
|
||||
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, es.gatherClusterHealth("junk", &acc))
|
||||
|
||||
checkIsMaster(es, false, t)
|
||||
require.NoError(t, es.Gather(&acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health",
|
||||
clusterHealthExpected,
|
||||
@@ -117,77 +88,6 @@ func TestGatherClusterHealth(t *testing.T) {
|
||||
map[string]string{"index": "v2"})
|
||||
}
|
||||
|
||||
func TestGatherClusterStatsMaster(t *testing.T) {
|
||||
// This needs multiple steps to replicate the multiple calls internally.
|
||||
es := newElasticsearchWithClient()
|
||||
es.ClusterStats = true
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
|
||||
// first get catMaster
|
||||
es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult)
|
||||
require.NoError(t, es.setCatMaster("junk"))
|
||||
|
||||
IsMasterResultTokens := strings.Split(string(IsMasterResult), " ")
|
||||
if es.catMasterResponseTokens[0] != IsMasterResultTokens[0] {
|
||||
msg := fmt.Sprintf("catmaster is incorrect")
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
|
||||
// now get node status, which determines whether we're master
|
||||
var acc testutil.Accumulator
|
||||
es.Local = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
||||
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkIsMaster(es, true, t)
|
||||
checkNodeStatsResult(t, &acc)
|
||||
|
||||
// now test the clusterstats method
|
||||
es.client.Transport = newTransportMock(http.StatusOK, clusterStatsResponse)
|
||||
require.NoError(t, es.gatherClusterStats("junk", &acc))
|
||||
|
||||
tags := map[string]string{
|
||||
"cluster_name": "es-testcluster",
|
||||
"node_name": "test.host.com",
|
||||
"status": "red",
|
||||
}
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_nodes", clusterstatsNodesExpected, tags)
|
||||
acc.AssertContainsTaggedFields(t, "elasticsearch_clusterstats_indices", clusterstatsIndicesExpected, tags)
|
||||
}
|
||||
|
||||
func TestGatherClusterStatsNonMaster(t *testing.T) {
|
||||
// This needs multiple steps to replicate the multiple calls internally.
|
||||
es := newElasticsearchWithClient()
|
||||
es.ClusterStats = true
|
||||
es.Servers = []string{"http://example.com:9200"}
|
||||
|
||||
// first get catMaster
|
||||
es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult)
|
||||
require.NoError(t, es.setCatMaster("junk"))
|
||||
|
||||
IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ")
|
||||
if es.catMasterResponseTokens[0] != IsNotMasterResultTokens[0] {
|
||||
msg := fmt.Sprintf("catmaster is incorrect")
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
|
||||
// now get node status, which determines whether we're master
|
||||
var acc testutil.Accumulator
|
||||
es.Local = true
|
||||
es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse)
|
||||
if err := es.gatherNodeStats("junk", &acc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ensure flag is clear so Cluster Stats would not be done
|
||||
checkIsMaster(es, false, t)
|
||||
checkNodeStatsResult(t, &acc)
|
||||
|
||||
}
|
||||
|
||||
func newElasticsearchWithClient() *Elasticsearch {
|
||||
es := NewElasticsearch()
|
||||
es.client = &http.Client{}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package elasticsearch
|
||||
|
||||
const clusterHealthResponse = `
|
||||
const clusterResponse = `
|
||||
{
|
||||
"cluster_name": "elasticsearch_telegraf",
|
||||
"status": "green",
|
||||
@@ -71,7 +71,7 @@ var v2IndexExpected = map[string]interface{}{
|
||||
"unassigned_shards": 20,
|
||||
}
|
||||
|
||||
const nodeStatsResponse = `
|
||||
const statsResponse = `
|
||||
{
|
||||
"cluster_name": "es-testcluster",
|
||||
"nodes": {
|
||||
@@ -489,7 +489,7 @@ const nodeStatsResponse = `
|
||||
}
|
||||
`
|
||||
|
||||
var nodestatsIndicesExpected = map[string]interface{}{
|
||||
var indicesExpected = map[string]interface{}{
|
||||
"id_cache_memory_size_in_bytes": float64(0),
|
||||
"completion_size_in_bytes": float64(0),
|
||||
"suggest_total": float64(0),
|
||||
@@ -561,7 +561,7 @@ var nodestatsIndicesExpected = map[string]interface{}{
|
||||
"segments_fixed_bit_set_memory_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var nodestatsOsExpected = map[string]interface{}{
|
||||
var osExpected = map[string]interface{}{
|
||||
"load_average_0": float64(0.01),
|
||||
"load_average_1": float64(0.04),
|
||||
"load_average_2": float64(0.05),
|
||||
@@ -576,7 +576,7 @@ var nodestatsOsExpected = map[string]interface{}{
|
||||
"mem_used_in_bytes": float64(1621868544),
|
||||
}
|
||||
|
||||
var nodestatsProcessExpected = map[string]interface{}{
|
||||
var processExpected = map[string]interface{}{
|
||||
"mem_total_virtual_in_bytes": float64(4747890688),
|
||||
"timestamp": float64(1436460392945),
|
||||
"open_file_descriptors": float64(160),
|
||||
@@ -586,7 +586,7 @@ var nodestatsProcessExpected = map[string]interface{}{
|
||||
"cpu_user_in_millis": float64(13610),
|
||||
}
|
||||
|
||||
var nodestatsJvmExpected = map[string]interface{}{
|
||||
var jvmExpected = map[string]interface{}{
|
||||
"timestamp": float64(1436460392945),
|
||||
"uptime_in_millis": float64(202245),
|
||||
"mem_non_heap_used_in_bytes": float64(39634576),
|
||||
@@ -621,7 +621,7 @@ var nodestatsJvmExpected = map[string]interface{}{
|
||||
"buffer_pools_mapped_total_capacity_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
var nodestatsThreadPoolExpected = map[string]interface{}{
|
||||
var threadPoolExpected = map[string]interface{}{
|
||||
"merge_threads": float64(6),
|
||||
"merge_queue": float64(4),
|
||||
"merge_active": float64(5),
|
||||
@@ -726,7 +726,7 @@ var nodestatsThreadPoolExpected = map[string]interface{}{
|
||||
"flush_completed": float64(3),
|
||||
}
|
||||
|
||||
var nodestatsFsExpected = map[string]interface{}{
|
||||
var fsExpected = map[string]interface{}{
|
||||
"data_0_total_in_bytes": float64(19507089408),
|
||||
"data_0_free_in_bytes": float64(16909316096),
|
||||
"data_0_available_in_bytes": float64(15894814720),
|
||||
@@ -736,7 +736,7 @@ var nodestatsFsExpected = map[string]interface{}{
|
||||
"total_total_in_bytes": float64(19507089408),
|
||||
}
|
||||
|
||||
var nodestatsTransportExpected = map[string]interface{}{
|
||||
var transportExpected = map[string]interface{}{
|
||||
"server_open": float64(13),
|
||||
"rx_count": float64(6),
|
||||
"rx_size_in_bytes": float64(1380),
|
||||
@@ -744,12 +744,12 @@ var nodestatsTransportExpected = map[string]interface{}{
|
||||
"tx_size_in_bytes": float64(1380),
|
||||
}
|
||||
|
||||
var nodestatsHttpExpected = map[string]interface{}{
|
||||
var httpExpected = map[string]interface{}{
|
||||
"current_open": float64(3),
|
||||
"total_opened": float64(3),
|
||||
}
|
||||
|
||||
var nodestatsBreakersExpected = map[string]interface{}{
|
||||
var breakersExpected = map[string]interface{}{
|
||||
"fielddata_estimated_size_in_bytes": float64(0),
|
||||
"fielddata_overhead": float64(1.03),
|
||||
"fielddata_tripped": float64(0),
|
||||
@@ -763,273 +763,3 @@ var nodestatsBreakersExpected = map[string]interface{}{
|
||||
"parent_limit_size_in_bytes": float64(727213670),
|
||||
"parent_estimated_size_in_bytes": float64(0),
|
||||
}
|
||||
|
||||
const clusterStatsResponse = `
|
||||
{
|
||||
"host":"ip-10-0-1-214",
|
||||
"log_type":"metrics",
|
||||
"timestamp":1475767451229,
|
||||
"log_level":"INFO",
|
||||
"node_name":"test.host.com",
|
||||
"cluster_name":"es-testcluster",
|
||||
"status":"red",
|
||||
"indices":{
|
||||
"count":1,
|
||||
"shards":{
|
||||
"total":4,
|
||||
"primaries":4,
|
||||
"replication":0.0,
|
||||
"index":{
|
||||
"shards":{
|
||||
"min":4,
|
||||
"max":4,
|
||||
"avg":4.0
|
||||
},
|
||||
"primaries":{
|
||||
"min":4,
|
||||
"max":4,
|
||||
"avg":4.0
|
||||
},
|
||||
"replication":{
|
||||
"min":0.0,
|
||||
"max":0.0,
|
||||
"avg":0.0
|
||||
}
|
||||
}
|
||||
},
|
||||
"docs":{
|
||||
"count":4,
|
||||
"deleted":0
|
||||
},
|
||||
"store":{
|
||||
"size_in_bytes":17084,
|
||||
"throttle_time_in_millis":0
|
||||
},
|
||||
"fielddata":{
|
||||
"memory_size_in_bytes":0,
|
||||
"evictions":0
|
||||
},
|
||||
"query_cache":{
|
||||
"memory_size_in_bytes":0,
|
||||
"total_count":0,
|
||||
"hit_count":0,
|
||||
"miss_count":0,
|
||||
"cache_size":0,
|
||||
"cache_count":0,
|
||||
"evictions":0
|
||||
},
|
||||
"completion":{
|
||||
"size_in_bytes":0
|
||||
},
|
||||
"segments":{
|
||||
"count":4,
|
||||
"memory_in_bytes":11828,
|
||||
"terms_memory_in_bytes":8932,
|
||||
"stored_fields_memory_in_bytes":1248,
|
||||
"term_vectors_memory_in_bytes":0,
|
||||
"norms_memory_in_bytes":1280,
|
||||
"doc_values_memory_in_bytes":368,
|
||||
"index_writer_memory_in_bytes":0,
|
||||
"index_writer_max_memory_in_bytes":2048000,
|
||||
"version_map_memory_in_bytes":0,
|
||||
"fixed_bit_set_memory_in_bytes":0
|
||||
},
|
||||
"percolate":{
|
||||
"total":0,
|
||||
"time_in_millis":0,
|
||||
"current":0,
|
||||
"memory_size_in_bytes":-1,
|
||||
"memory_size":"-1b",
|
||||
"queries":0
|
||||
}
|
||||
},
|
||||
"nodes":{
|
||||
"count":{
|
||||
"total":1,
|
||||
"master_only":0,
|
||||
"data_only":0,
|
||||
"master_data":1,
|
||||
"client":0
|
||||
},
|
||||
"versions":[
|
||||
{
|
||||
"version": "2.3.3"
|
||||
}
|
||||
],
|
||||
"os":{
|
||||
"available_processors":1,
|
||||
"allocated_processors":1,
|
||||
"mem":{
|
||||
"total_in_bytes":593301504
|
||||
},
|
||||
"names":[
|
||||
{
|
||||
"name":"Linux",
|
||||
"count":1
|
||||
}
|
||||
]
|
||||
},
|
||||
"process":{
|
||||
"cpu":{
|
||||
"percent":0
|
||||
},
|
||||
"open_file_descriptors":{
|
||||
"min":145,
|
||||
"max":145,
|
||||
"avg":145
|
||||
}
|
||||
},
|
||||
"jvm":{
|
||||
"max_uptime_in_millis":11580527,
|
||||
"versions":[
|
||||
{
|
||||
"version":"1.8.0_101",
|
||||
"vm_name":"OpenJDK 64-Bit Server VM",
|
||||
"vm_version":"25.101-b13",
|
||||
"vm_vendor":"Oracle Corporation",
|
||||
"count":1
|
||||
}
|
||||
],
|
||||
"mem":{
|
||||
"heap_used_in_bytes":70550288,
|
||||
"heap_max_in_bytes":1065025536
|
||||
},
|
||||
"threads":30
|
||||
},
|
||||
"fs":{
|
||||
"total_in_bytes":8318783488,
|
||||
"free_in_bytes":6447439872,
|
||||
"available_in_bytes":6344785920
|
||||
},
|
||||
"plugins":[
|
||||
{
|
||||
"name":"cloud-aws",
|
||||
"version":"2.3.3",
|
||||
"description":"The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
|
||||
"jvm":true,
|
||||
"classname":"org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
|
||||
"isolated":true,
|
||||
"site":false
|
||||
},
|
||||
{
|
||||
"name":"kopf",
|
||||
"version":"2.0.1",
|
||||
"description":"kopf - simple web administration tool for Elasticsearch",
|
||||
"url":"/_plugin/kopf/",
|
||||
"jvm":false,
|
||||
"site":true
|
||||
},
|
||||
{
|
||||
"name":"tr-metrics",
|
||||
"version":"7bd5b4b",
|
||||
"description":"Logs cluster and node stats for performance monitoring.",
|
||||
"jvm":true,
|
||||
"classname":"com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
|
||||
"isolated":true,
|
||||
"site":false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var clusterstatsIndicesExpected = map[string]interface{}{
|
||||
"completion_size_in_bytes": float64(0),
|
||||
"count": float64(1),
|
||||
"docs_count": float64(4),
|
||||
"docs_deleted": float64(0),
|
||||
"fielddata_evictions": float64(0),
|
||||
"fielddata_memory_size_in_bytes": float64(0),
|
||||
"percolate_current": float64(0),
|
||||
"percolate_memory_size_in_bytes": float64(-1),
|
||||
"percolate_queries": float64(0),
|
||||
"percolate_time_in_millis": float64(0),
|
||||
"percolate_total": float64(0),
|
||||
"percolate_memory_size": "-1b",
|
||||
"query_cache_cache_count": float64(0),
|
||||
"query_cache_cache_size": float64(0),
|
||||
"query_cache_evictions": float64(0),
|
||||
"query_cache_hit_count": float64(0),
|
||||
"query_cache_memory_size_in_bytes": float64(0),
|
||||
"query_cache_miss_count": float64(0),
|
||||
"query_cache_total_count": float64(0),
|
||||
"segments_count": float64(4),
|
||||
"segments_doc_values_memory_in_bytes": float64(368),
|
||||
"segments_fixed_bit_set_memory_in_bytes": float64(0),
|
||||
"segments_index_writer_max_memory_in_bytes": float64(2.048e+06),
|
||||
"segments_index_writer_memory_in_bytes": float64(0),
|
||||
"segments_memory_in_bytes": float64(11828),
|
||||
"segments_norms_memory_in_bytes": float64(1280),
|
||||
"segments_stored_fields_memory_in_bytes": float64(1248),
|
||||
"segments_term_vectors_memory_in_bytes": float64(0),
|
||||
"segments_terms_memory_in_bytes": float64(8932),
|
||||
"segments_version_map_memory_in_bytes": float64(0),
|
||||
"shards_index_primaries_avg": float64(4),
|
||||
"shards_index_primaries_max": float64(4),
|
||||
"shards_index_primaries_min": float64(4),
|
||||
"shards_index_replication_avg": float64(0),
|
||||
"shards_index_replication_max": float64(0),
|
||||
"shards_index_replication_min": float64(0),
|
||||
"shards_index_shards_avg": float64(4),
|
||||
"shards_index_shards_max": float64(4),
|
||||
"shards_index_shards_min": float64(4),
|
||||
"shards_primaries": float64(4),
|
||||
"shards_replication": float64(0),
|
||||
"shards_total": float64(4),
|
||||
"store_size_in_bytes": float64(17084),
|
||||
"store_throttle_time_in_millis": float64(0),
|
||||
}
|
||||
|
||||
var clusterstatsNodesExpected = map[string]interface{}{
|
||||
"count_client": float64(0),
|
||||
"count_data_only": float64(0),
|
||||
"count_master_data": float64(1),
|
||||
"count_master_only": float64(0),
|
||||
"count_total": float64(1),
|
||||
"fs_available_in_bytes": float64(6.34478592e+09),
|
||||
"fs_free_in_bytes": float64(6.447439872e+09),
|
||||
"fs_total_in_bytes": float64(8.318783488e+09),
|
||||
"jvm_max_uptime_in_millis": float64(1.1580527e+07),
|
||||
"jvm_mem_heap_max_in_bytes": float64(1.065025536e+09),
|
||||
"jvm_mem_heap_used_in_bytes": float64(7.0550288e+07),
|
||||
"jvm_threads": float64(30),
|
||||
"jvm_versions_0_count": float64(1),
|
||||
"jvm_versions_0_version": "1.8.0_101",
|
||||
"jvm_versions_0_vm_name": "OpenJDK 64-Bit Server VM",
|
||||
"jvm_versions_0_vm_vendor": "Oracle Corporation",
|
||||
"jvm_versions_0_vm_version": "25.101-b13",
|
||||
"os_allocated_processors": float64(1),
|
||||
"os_available_processors": float64(1),
|
||||
"os_mem_total_in_bytes": float64(5.93301504e+08),
|
||||
"os_names_0_count": float64(1),
|
||||
"os_names_0_name": "Linux",
|
||||
"process_cpu_percent": float64(0),
|
||||
"process_open_file_descriptors_avg": float64(145),
|
||||
"process_open_file_descriptors_max": float64(145),
|
||||
"process_open_file_descriptors_min": float64(145),
|
||||
"versions_0_version": "2.3.3",
|
||||
"plugins_0_classname": "org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin",
|
||||
"plugins_0_description": "The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.",
|
||||
"plugins_0_isolated": true,
|
||||
"plugins_0_jvm": true,
|
||||
"plugins_0_name": "cloud-aws",
|
||||
"plugins_0_site": false,
|
||||
"plugins_0_version": "2.3.3",
|
||||
"plugins_1_description": "kopf - simple web administration tool for Elasticsearch",
|
||||
"plugins_1_jvm": false,
|
||||
"plugins_1_name": "kopf",
|
||||
"plugins_1_site": true,
|
||||
"plugins_1_url": "/_plugin/kopf/",
|
||||
"plugins_1_version": "2.0.1",
|
||||
"plugins_2_classname": "com.trgr.elasticsearch.plugin.metrics.MetricsPlugin",
|
||||
"plugins_2_description": "Logs cluster and node stats for performance monitoring.",
|
||||
"plugins_2_isolated": true,
|
||||
"plugins_2_jvm": true,
|
||||
"plugins_2_name": "tr-metrics",
|
||||
"plugins_2_site": false,
|
||||
"plugins_2_version": "7bd5b4b",
|
||||
}
|
||||
|
||||
const IsMasterResult = "SDFsfSDFsdfFSDSDfSFDSDF 10.206.124.66 10.206.124.66 test.host.com "
|
||||
|
||||
const IsNotMasterResult = "junk 10.206.124.66 10.206.124.66 test.junk.com "
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
@@ -79,14 +78,8 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error {
|
||||
"file": fileName,
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"exists": int64(1),
|
||||
}
|
||||
|
||||
if fileInfo == nil {
|
||||
log.Printf("E! Unable to get info for file [%s], possible permissions issue",
|
||||
fileName)
|
||||
} else {
|
||||
fields["size_bytes"] = fileInfo.Size()
|
||||
"exists": int64(1),
|
||||
"size_bytes": fileInfo.Size(),
|
||||
}
|
||||
|
||||
if f.Md5 {
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
|
||||
Server addresses need to explicitly start with 'http' if you wish to use HAproxy status page. Otherwise, address will be assumed to be an UNIX socket and protocol (if present) will be discarded.
|
||||
|
||||
For basic authentication you need to add username and password in the URL: `http://user:password@1.2.3.4/haproxy?stats`.
|
||||
|
||||
Following examples will all resolve to the same socket:
|
||||
```
|
||||
socket:/var/run/haproxy.sock
|
||||
|
||||
@@ -263,11 +263,6 @@ func importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error {
|
||||
if err == nil {
|
||||
fields["smax"] = ival
|
||||
}
|
||||
case HF_SLIM:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
fields["slim"] = ival
|
||||
}
|
||||
case HF_STOT:
|
||||
ival, err := strconv.ParseUint(v, 10, 64)
|
||||
if err == nil {
|
||||
|
||||
@@ -198,7 +198,6 @@ func HaproxyGetFieldValues() map[string]interface{} {
|
||||
"rtime": uint64(312),
|
||||
"scur": uint64(1),
|
||||
"smax": uint64(32),
|
||||
"slim": uint64(32),
|
||||
"srv_abort": uint64(1),
|
||||
"stot": uint64(171014),
|
||||
"ttime": uint64(2341),
|
||||
@@ -224,6 +223,6 @@ be_static,host1,0,0,0,1,,28,7873,1209688,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,
|
||||
be_static,host2,0,0,0,1,,28,13830,1085929,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,9,,28,,2,0,,1,L4OK,,0,0,19,6,3,0,0,0,,,,0,0,,,,,338,,,0,1,1,38,
|
||||
be_static,host3,0,0,0,1,,28,17959,1259760,,0,,0,0,0,0,UP,1,1,0,0,0,70698,0,,2,18,10,,28,,2,0,,1,L4OK,,1,0,20,6,2,0,0,0,,,,0,0,,,,,92,,,0,1,1,17,
|
||||
be_static,BACKEND,0,0,0,2,200,307,160276,13322728,0,0,,0,0,0,0,UP,11,11,0,,0,70698,0,,2,18,0,,307,,1,0,,4,,,,0,205,73,29,0,0,,,,,0,0,0,0,0,0,92,,,0,1,3,381,
|
||||
be_app,host0,0,0,1,32,32,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341,
|
||||
be_app,host4,0,0,2,29,32,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355,
|
||||
be_app,host0,0,0,1,32,,171014,510913516,2193856571,,0,,0,1,1,0,UP,100,1,0,1,0,70698,0,,2,19,1,,171013,,2,3,,12,L7OK,301,10,0,119534,48051,2345,1056,0,0,,,,73,1,,,,,0,Moved Permanently,,0,2,312,2341,
|
||||
be_app,host4,0,0,2,29,,171013,499318742,2195595896,12,34,,0,2,0,0,UP,100,1,0,2,0,70698,0,,2,19,2,,171013,,2,3,,12,L7OK,301,12,0,119572,47882,2441,1088,0,0,,,,84,2,,,,,0,Moved Permanently,,0,2,316,2355,
|
||||
`
|
||||
|
||||
@@ -8,7 +8,7 @@ Hddtemp should be installed and its daemon running
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
```
|
||||
[[inputs.hddtemp]]
|
||||
## By default, telegraf gathers temps data from all disks detected by the
|
||||
## hddtemp.
|
||||
@@ -20,24 +20,3 @@ Hddtemp should be installed and its daemon running
|
||||
# address = "127.0.0.1:7634"
|
||||
# devices = ["sda", "*"]
|
||||
```
|
||||
|
||||
## Measurements
|
||||
|
||||
- hddtemp
|
||||
- temperature
|
||||
|
||||
Tags:
|
||||
- device
|
||||
- model
|
||||
- unit
|
||||
- status
|
||||
|
||||
|
||||
|
||||
## Example output
|
||||
|
||||
```
|
||||
> hddtemp,unit=C,status=,host=server1,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000
|
||||
> hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=38i 148165564700000000
|
||||
> hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=36i 1481655647000000000
|
||||
```
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Disk struct {
|
||||
type disk struct {
|
||||
DeviceName string
|
||||
Model string
|
||||
Temperature int32
|
||||
@@ -16,19 +16,12 @@ type Disk struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
type hddtemp struct {
|
||||
}
|
||||
|
||||
func New() *hddtemp {
|
||||
return &hddtemp{}
|
||||
}
|
||||
|
||||
func (h *hddtemp) Fetch(address string) ([]Disk, error) {
|
||||
func Fetch(address string) ([]disk, error) {
|
||||
var (
|
||||
err error
|
||||
conn net.Conn
|
||||
buffer bytes.Buffer
|
||||
disks []Disk
|
||||
disks []disk
|
||||
)
|
||||
|
||||
if conn, err = net.Dial("tcp", address); err != nil {
|
||||
@@ -55,7 +48,7 @@ func (h *hddtemp) Fetch(address string) ([]Disk, error) {
|
||||
status = temperatureField
|
||||
}
|
||||
|
||||
disks = append(disks, Disk{
|
||||
disks = append(disks, disk{
|
||||
DeviceName: device,
|
||||
Model: fields[offset+2],
|
||||
Temperature: int32(temperature),
|
||||
|
||||
@@ -10,13 +10,13 @@ func TestFetch(t *testing.T) {
|
||||
l := serve(t, []byte("|/dev/sda|foobar|36|C|"))
|
||||
defer l.Close()
|
||||
|
||||
disks, err := New().Fetch(l.Addr().String())
|
||||
disks, err := Fetch(l.Addr().String())
|
||||
|
||||
if err != nil {
|
||||
t.Error("expecting err to be nil")
|
||||
}
|
||||
|
||||
expected := []Disk{
|
||||
expected := []disk{
|
||||
{
|
||||
DeviceName: "sda",
|
||||
Model: "foobar",
|
||||
@@ -31,7 +31,7 @@ func TestFetch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFetchWrongAddress(t *testing.T) {
|
||||
_, err := New().Fetch("127.0.0.1:1")
|
||||
_, err := Fetch("127.0.0.1:1")
|
||||
|
||||
if err == nil {
|
||||
t.Error("expecting err to be non-nil")
|
||||
@@ -42,13 +42,13 @@ func TestFetchStatus(t *testing.T) {
|
||||
l := serve(t, []byte("|/dev/sda|foobar|SLP|C|"))
|
||||
defer l.Close()
|
||||
|
||||
disks, err := New().Fetch(l.Addr().String())
|
||||
disks, err := Fetch(l.Addr().String())
|
||||
|
||||
if err != nil {
|
||||
t.Error("expecting err to be nil")
|
||||
}
|
||||
|
||||
expected := []Disk{
|
||||
expected := []disk{
|
||||
{
|
||||
DeviceName: "sda",
|
||||
Model: "foobar",
|
||||
@@ -67,13 +67,13 @@ func TestFetchTwoDisks(t *testing.T) {
|
||||
l := serve(t, []byte("|/dev/hda|ST380011A|46|C||/dev/hdd|ST340016A|SLP|*|"))
|
||||
defer l.Close()
|
||||
|
||||
disks, err := New().Fetch(l.Addr().String())
|
||||
disks, err := Fetch(l.Addr().String())
|
||||
|
||||
if err != nil {
|
||||
t.Error("expecting err to be nil")
|
||||
}
|
||||
|
||||
expected := []Disk{
|
||||
expected := []disk{
|
||||
{
|
||||
DeviceName: "hda",
|
||||
Model: "ST380011A",
|
||||
|
||||
@@ -13,11 +13,6 @@ const defaultAddress = "127.0.0.1:7634"
|
||||
type HDDTemp struct {
|
||||
Address string
|
||||
Devices []string
|
||||
fetcher Fetcher
|
||||
}
|
||||
|
||||
type Fetcher interface {
|
||||
Fetch(address string) ([]gohddtemp.Disk, error)
|
||||
}
|
||||
|
||||
func (_ *HDDTemp) Description() string {
|
||||
@@ -41,10 +36,7 @@ func (_ *HDDTemp) SampleConfig() string {
|
||||
}
|
||||
|
||||
func (h *HDDTemp) Gather(acc telegraf.Accumulator) error {
|
||||
if h.fetcher == nil {
|
||||
h.fetcher = gohddtemp.New()
|
||||
}
|
||||
disks, err := h.fetcher.Fetch(h.Address)
|
||||
disks, err := gohddtemp.Fetch(h.Address)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -61,7 +53,7 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error {
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"temperature": disk.Temperature,
|
||||
disk.DeviceName: disk.Temperature,
|
||||
}
|
||||
|
||||
acc.AddFields("hddtemp", fields, tags)
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
package hddtemp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
hddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type mockFetcher struct {
|
||||
}
|
||||
|
||||
func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) {
|
||||
return []hddtemp.Disk{
|
||||
hddtemp.Disk{
|
||||
DeviceName: "Disk1",
|
||||
Model: "Model1",
|
||||
Temperature: 13,
|
||||
Unit: "C",
|
||||
},
|
||||
hddtemp.Disk{
|
||||
DeviceName: "Disk2",
|
||||
Model: "Model2",
|
||||
Temperature: 14,
|
||||
Unit: "C",
|
||||
},
|
||||
}, nil
|
||||
|
||||
}
|
||||
func newMockFetcher() *mockFetcher {
|
||||
return &mockFetcher{}
|
||||
}
|
||||
|
||||
func TestFetch(t *testing.T) {
|
||||
hddtemp := &HDDTemp{
|
||||
fetcher: newMockFetcher(),
|
||||
Devices: []string{"*"},
|
||||
}
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
err := hddtemp.Gather(acc)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, acc.NFields(), 2)
|
||||
|
||||
var tests = []struct {
|
||||
fields map[string]interface{}
|
||||
tags map[string]string
|
||||
}{
|
||||
{
|
||||
map[string]interface{}{
|
||||
"temperature": int32(13),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "Disk1",
|
||||
"model": "Model1",
|
||||
"unit": "C",
|
||||
"status": "",
|
||||
},
|
||||
},
|
||||
{
|
||||
map[string]interface{}{
|
||||
"temperature": int32(14),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "Disk2",
|
||||
"model": "Model2",
|
||||
"unit": "C",
|
||||
"status": "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
acc.AssertContainsTaggedFields(t, "hddtemp", test.fields, test.tags)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -300,9 +300,6 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
|
||||
func (h *HTTPListener) parse(b []byte, t time.Time) error {
|
||||
if !bytes.HasSuffix(b, []byte("\n")) {
|
||||
b = append(b, '\n')
|
||||
}
|
||||
metrics, err := h.parser.ParseWithDefaultTime(b, t)
|
||||
|
||||
for _, m := range metrics {
|
||||
|
||||
@@ -16,8 +16,6 @@ import (
|
||||
const (
|
||||
testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n"
|
||||
|
||||
testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257"
|
||||
|
||||
testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server03 value=12.0 1422568543702900257
|
||||
cpu_load_short,host=server04 value=12.0 1422568543702900257
|
||||
@@ -83,28 +81,6 @@ func TestWriteHTTP(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
// http listener should add a newline at the end of the buffer if it's not there
|
||||
func TestWriteHTTPNoNewline(t *testing.T) {
|
||||
listener := newTestHTTPListener()
|
||||
|
||||
acc := &testutil.Accumulator{}
|
||||
require.NoError(t, listener.Start(acc))
|
||||
defer listener.Stop()
|
||||
|
||||
time.Sleep(time.Millisecond * 25)
|
||||
|
||||
// post single message to listener
|
||||
resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgNoNewline)))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 204, resp.StatusCode)
|
||||
|
||||
time.Sleep(time.Millisecond * 15)
|
||||
acc.AssertContainsTaggedFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(12)},
|
||||
map[string]string{"host": "server01"},
|
||||
)
|
||||
}
|
||||
|
||||
func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) {
|
||||
listener := &HTTPListener{
|
||||
ServiceAddress: ":8296",
|
||||
|
||||
@@ -94,33 +94,32 @@ type point struct {
|
||||
}
|
||||
|
||||
type memstats struct {
|
||||
Alloc int64 `json:"Alloc"`
|
||||
TotalAlloc int64 `json:"TotalAlloc"`
|
||||
Sys int64 `json:"Sys"`
|
||||
Lookups int64 `json:"Lookups"`
|
||||
Mallocs int64 `json:"Mallocs"`
|
||||
Frees int64 `json:"Frees"`
|
||||
HeapAlloc int64 `json:"HeapAlloc"`
|
||||
HeapSys int64 `json:"HeapSys"`
|
||||
HeapIdle int64 `json:"HeapIdle"`
|
||||
HeapInuse int64 `json:"HeapInuse"`
|
||||
HeapReleased int64 `json:"HeapReleased"`
|
||||
HeapObjects int64 `json:"HeapObjects"`
|
||||
StackInuse int64 `json:"StackInuse"`
|
||||
StackSys int64 `json:"StackSys"`
|
||||
MSpanInuse int64 `json:"MSpanInuse"`
|
||||
MSpanSys int64 `json:"MSpanSys"`
|
||||
MCacheInuse int64 `json:"MCacheInuse"`
|
||||
MCacheSys int64 `json:"MCacheSys"`
|
||||
BuckHashSys int64 `json:"BuckHashSys"`
|
||||
GCSys int64 `json:"GCSys"`
|
||||
OtherSys int64 `json:"OtherSys"`
|
||||
NextGC int64 `json:"NextGC"`
|
||||
LastGC int64 `json:"LastGC"`
|
||||
PauseTotalNs int64 `json:"PauseTotalNs"`
|
||||
PauseNs [256]int64 `json:"PauseNs"`
|
||||
NumGC int64 `json:"NumGC"`
|
||||
GCCPUFraction float64 `json:"GCCPUFraction"`
|
||||
Alloc int64 `json:"Alloc"`
|
||||
TotalAlloc int64 `json:"TotalAlloc"`
|
||||
Sys int64 `json:"Sys"`
|
||||
Lookups int64 `json:"Lookups"`
|
||||
Mallocs int64 `json:"Mallocs"`
|
||||
Frees int64 `json:"Frees"`
|
||||
HeapAlloc int64 `json:"HeapAlloc"`
|
||||
HeapSys int64 `json:"HeapSys"`
|
||||
HeapIdle int64 `json:"HeapIdle"`
|
||||
HeapInuse int64 `json:"HeapInuse"`
|
||||
HeapReleased int64 `json:"HeapReleased"`
|
||||
HeapObjects int64 `json:"HeapObjects"`
|
||||
StackInuse int64 `json:"StackInuse"`
|
||||
StackSys int64 `json:"StackSys"`
|
||||
MSpanInuse int64 `json:"MSpanInuse"`
|
||||
MSpanSys int64 `json:"MSpanSys"`
|
||||
MCacheInuse int64 `json:"MCacheInuse"`
|
||||
MCacheSys int64 `json:"MCacheSys"`
|
||||
BuckHashSys int64 `json:"BuckHashSys"`
|
||||
GCSys int64 `json:"GCSys"`
|
||||
OtherSys int64 `json:"OtherSys"`
|
||||
NextGC int64 `json:"NextGC"`
|
||||
LastGC int64 `json:"LastGC"`
|
||||
PauseTotalNs int64 `json:"PauseTotalNs"`
|
||||
NumGC int64 `json:"NumGC"`
|
||||
GCCPUFraction float64 `json:"GCCPUFraction"`
|
||||
}
|
||||
|
||||
// Gathers data from a particular URL
|
||||
@@ -203,7 +202,6 @@ func (i *InfluxDB) gatherURL(
|
||||
"next_gc": m.NextGC,
|
||||
"last_gc": m.LastGC,
|
||||
"pause_total_ns": m.PauseTotalNs,
|
||||
"pause_ns": m.PauseNs[(m.NumGC+255)%256],
|
||||
"num_gc": m.NumGC,
|
||||
"gcc_pu_fraction": m.GCCPUFraction,
|
||||
},
|
||||
|
||||
@@ -86,7 +86,6 @@ func TestInfluxDB(t *testing.T) {
|
||||
"frees": int64(381008),
|
||||
"heap_idle": int64(15802368),
|
||||
"pause_total_ns": int64(5132914),
|
||||
"pause_ns": int64(127053),
|
||||
"lookups": int64(77),
|
||||
"heap_sys": int64(33849344),
|
||||
"mcache_sys": int64(16384),
|
||||
|
||||
@@ -6,8 +6,7 @@
|
||||
# Read JMX metrics through Jolokia
|
||||
[[inputs.jolokia]]
|
||||
## This is the context root used to compose the jolokia url
|
||||
## NOTE that Jolokia requires a trailing slash at the end of the context root
|
||||
context = "/jolokia/"
|
||||
context = "/jolokia"
|
||||
|
||||
## This specifies the mode used
|
||||
# mode = "proxy"
|
||||
@@ -18,16 +17,7 @@
|
||||
# [inputs.jolokia.proxy]
|
||||
# host = "127.0.0.1"
|
||||
# port = "8080"
|
||||
|
||||
## Optional http timeouts
|
||||
##
|
||||
## response_header_timeout, if non-zero, specifies the amount of time to wait
|
||||
## for a server's response headers after fully writing the request.
|
||||
# response_header_timeout = "3s"
|
||||
##
|
||||
## client_timeout specifies a time limit for requests made by this client.
|
||||
## Includes connection time, any redirects, and reading the response body.
|
||||
# client_timeout = "4s"
|
||||
|
||||
|
||||
## List of servers exposing jolokia read service
|
||||
[[inputs.jolokia.servers]]
|
||||
|
||||
@@ -11,14 +11,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
// Default http timeouts
|
||||
var DefaultResponseHeaderTimeout = internal.Duration{Duration: 3 * time.Second}
|
||||
var DefaultClientTimeout = internal.Duration{Duration: 4 * time.Second}
|
||||
|
||||
type Server struct {
|
||||
Name string
|
||||
Host string
|
||||
@@ -47,23 +42,18 @@ func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error
|
||||
}
|
||||
|
||||
type Jolokia struct {
|
||||
jClient JolokiaClient
|
||||
Context string
|
||||
Mode string
|
||||
Servers []Server
|
||||
Metrics []Metric
|
||||
Proxy Server
|
||||
Delimiter string
|
||||
|
||||
ResponseHeaderTimeout internal.Duration `toml:"response_header_timeout"`
|
||||
ClientTimeout internal.Duration `toml:"client_timeout"`
|
||||
jClient JolokiaClient
|
||||
Context string
|
||||
Mode string
|
||||
Servers []Server
|
||||
Metrics []Metric
|
||||
Proxy Server
|
||||
}
|
||||
|
||||
const sampleConfig = `
|
||||
## This is the context root used to compose the jolokia url
|
||||
## NOTE that Jolokia requires a trailing slash at the end of the context root
|
||||
## NOTE that your jolokia security policy must allow for POST requests.
|
||||
context = "/jolokia/"
|
||||
context = "/jolokia"
|
||||
|
||||
## This specifies the mode used
|
||||
# mode = "proxy"
|
||||
@@ -75,22 +65,6 @@ const sampleConfig = `
|
||||
# host = "127.0.0.1"
|
||||
# port = "8080"
|
||||
|
||||
## Optional http timeouts
|
||||
##
|
||||
## response_header_timeout, if non-zero, specifies the amount of time to wait
|
||||
## for a server's response headers after fully writing the request.
|
||||
# response_header_timeout = "3s"
|
||||
##
|
||||
## client_timeout specifies a time limit for requests made by this client.
|
||||
## Includes connection time, any redirects, and reading the response body.
|
||||
# client_timeout = "4s"
|
||||
|
||||
## Attribute delimiter
|
||||
##
|
||||
## When multiple attributes are returned for a single
|
||||
## [inputs.jolokia.metrics], the field name is a concatenation of the metric
|
||||
## name, and the attribute name, separated by the given delimiter.
|
||||
# delimiter = "_"
|
||||
|
||||
## List of servers exposing jolokia read service
|
||||
[[inputs.jolokia.servers]]
|
||||
@@ -174,7 +148,7 @@ func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) {
|
||||
|
||||
func (j *Jolokia) prepareRequest(server Server, metric Metric) (*http.Request, error) {
|
||||
var jolokiaUrl *url.URL
|
||||
context := j.Context // Usually "/jolokia/"
|
||||
context := j.Context // Usually "/jolokia"
|
||||
|
||||
// Create bodyContent
|
||||
bodyContent := map[string]interface{}{
|
||||
@@ -246,26 +220,7 @@ func (j *Jolokia) prepareRequest(server Server, metric Metric) (*http.Request, e
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (j *Jolokia) extractValues(measurement string, value interface{}, fields map[string]interface{}) {
|
||||
if mapValues, ok := value.(map[string]interface{}); ok {
|
||||
for k2, v2 := range mapValues {
|
||||
j.extractValues(measurement+j.Delimiter+k2, v2, fields)
|
||||
}
|
||||
} else {
|
||||
fields[measurement] = value
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
if j.jClient == nil {
|
||||
tr := &http.Transport{ResponseHeaderTimeout: j.ResponseHeaderTimeout.Duration}
|
||||
j.jClient = &JolokiaClientImpl{&http.Client{
|
||||
Transport: tr,
|
||||
Timeout: j.ClientTimeout.Duration,
|
||||
}}
|
||||
}
|
||||
|
||||
servers := j.Servers
|
||||
metrics := j.Metrics
|
||||
tags := make(map[string]string)
|
||||
@@ -289,8 +244,23 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
||||
if err != nil {
|
||||
fmt.Printf("Error handling response: %s\n", err)
|
||||
} else {
|
||||
|
||||
if values, ok := out["value"]; ok {
|
||||
j.extractValues(measurement, values, fields)
|
||||
switch t := values.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
switch t2 := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k2, v2 := range t2 {
|
||||
fields[measurement+"_"+k+"_"+k2] = v2
|
||||
}
|
||||
case interface{}:
|
||||
fields[measurement+"_"+k] = t2
|
||||
}
|
||||
}
|
||||
case interface{}:
|
||||
fields[measurement] = t
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Missing key 'value' in output response\n")
|
||||
}
|
||||
@@ -306,10 +276,11 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
func init() {
|
||||
inputs.Add("jolokia", func() telegraf.Input {
|
||||
return &Jolokia{
|
||||
ResponseHeaderTimeout: DefaultResponseHeaderTimeout,
|
||||
ClientTimeout: DefaultClientTimeout,
|
||||
Delimiter: "_",
|
||||
tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
Timeout: time.Duration(4 * time.Second),
|
||||
}
|
||||
return &Jolokia{jClient: &JolokiaClientImpl{client: client}}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -12,37 +12,6 @@ import (
|
||||
_ "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const validThreeLevelMultiValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
"mbean":"java.lang:type=*",
|
||||
"type":"read"
|
||||
},
|
||||
"value":{
|
||||
"java.lang:type=Memory":{
|
||||
"ObjectPendingFinalizationCount":0,
|
||||
"Verbose":false,
|
||||
"HeapMemoryUsage":{
|
||||
"init":134217728,
|
||||
"committed":173015040,
|
||||
"max":1908932608,
|
||||
"used":16840016
|
||||
},
|
||||
"NonHeapMemoryUsage":{
|
||||
"init":2555904,
|
||||
"committed":51380224,
|
||||
"max":-1,
|
||||
"used":49944048
|
||||
},
|
||||
"ObjectName":{
|
||||
"objectName":"java.lang:type=Memory"
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp":1446129191,
|
||||
"status":200
|
||||
}`
|
||||
|
||||
const validMultiValueJSON = `
|
||||
{
|
||||
"request":{
|
||||
@@ -104,10 +73,9 @@ func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error
|
||||
// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client
|
||||
func genJolokiaClientStub(response string, statusCode int, servers []Server, metrics []Metric) *Jolokia {
|
||||
return &Jolokia{
|
||||
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
|
||||
Servers: servers,
|
||||
Metrics: metrics,
|
||||
Delimiter: "_",
|
||||
jClient: jolokiaClientStub{responseBody: response, statusCode: statusCode},
|
||||
Servers: servers,
|
||||
Metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,38 +103,6 @@ func TestHttpJsonMultiValue(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t, "jolokia", fields, tags)
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonThreeLevelMultiValue(t *testing.T) {
|
||||
jolokia := genJolokiaClientStub(validThreeLevelMultiValueJSON, 200, Servers, []Metric{HeapMetric})
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"heap_memory_usage_java.lang:type=Memory_ObjectPendingFinalizationCount": 0.0,
|
||||
"heap_memory_usage_java.lang:type=Memory_Verbose": false,
|
||||
"heap_memory_usage_java.lang:type=Memory_HeapMemoryUsage_init": 134217728.0,
|
||||
"heap_memory_usage_java.lang:type=Memory_HeapMemoryUsage_max": 1908932608.0,
|
||||
"heap_memory_usage_java.lang:type=Memory_HeapMemoryUsage_used": 16840016.0,
|
||||
"heap_memory_usage_java.lang:type=Memory_HeapMemoryUsage_committed": 173015040.0,
|
||||
"heap_memory_usage_java.lang:type=Memory_NonHeapMemoryUsage_init": 2555904.0,
|
||||
"heap_memory_usage_java.lang:type=Memory_NonHeapMemoryUsage_committed": 51380224.0,
|
||||
"heap_memory_usage_java.lang:type=Memory_NonHeapMemoryUsage_max": -1.0,
|
||||
"heap_memory_usage_java.lang:type=Memory_NonHeapMemoryUsage_used": 49944048.0,
|
||||
"heap_memory_usage_java.lang:type=Memory_ObjectName_objectName": "java.lang:type=Memory",
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"jolokia_host": "127.0.0.1",
|
||||
"jolokia_port": "8080",
|
||||
"jolokia_name": "as1",
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "jolokia", fields, tags)
|
||||
}
|
||||
|
||||
// Test that the proper values are ignored or collected
|
||||
func TestHttpJsonOn404(t *testing.T) {
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ type Kafka struct {
|
||||
// channel for all incoming kafka messages
|
||||
in <-chan *sarama.ConsumerMessage
|
||||
// channel for all kafka consumer errors
|
||||
errs <-chan error
|
||||
errs <-chan *sarama.ConsumerError
|
||||
done chan struct{}
|
||||
|
||||
// keep the accumulator internally:
|
||||
|
||||
@@ -27,7 +27,7 @@ func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
|
||||
Offset: "oldest",
|
||||
in: in,
|
||||
doNotCommitMsgs: true,
|
||||
errs: make(chan error, 1000),
|
||||
errs: make(chan *sarama.ConsumerError, 1000),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
return &k, in
|
||||
|
||||
@@ -40,11 +40,8 @@ regex patterns.
|
||||
## Grok Parser
|
||||
|
||||
The grok parser uses a slightly modified version of logstash "grok" patterns,
|
||||
with the format
|
||||
with the format `%{<capture_syntax>[:<semantic_name>][:<modifier>]}`
|
||||
|
||||
```
|
||||
%{<capture_syntax>[:<semantic_name>][:<modifier>]}
|
||||
```
|
||||
|
||||
Telegraf has many of it's own
|
||||
[built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logparser/grok/patterns/influx-patterns),
|
||||
@@ -95,3 +92,4 @@ Timestamp modifiers can be used to convert captures to the timestamp of the
|
||||
CUSTOM time layouts must be within quotes and be the representation of the
|
||||
"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`
|
||||
See https://golang.org/pkg/time/#Parse for more details.
|
||||
|
||||
|
||||
@@ -82,46 +82,6 @@ func TestMeasurementName(t *testing.T) {
|
||||
assert.Equal(t, "my_web_log", m.Name())
|
||||
}
|
||||
|
||||
func TestCLF_IPv6(t *testing.T) {
|
||||
p := &Parser{
|
||||
Measurement: "my_web_log",
|
||||
Patterns: []string{"%{COMMON_LOG_FORMAT}"},
|
||||
}
|
||||
assert.NoError(t, p.Compile())
|
||||
|
||||
m, err := p.ParseLine(`2001:0db8:85a3:0000:0000:8a2e:0370:7334 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
"client_ip": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
|
||||
"http_version": float64(1.0),
|
||||
"ident": "user-identifier",
|
||||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
assert.Equal(t, "my_web_log", m.Name())
|
||||
|
||||
m, err = p.ParseLine(`::1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)
|
||||
require.NotNil(t, m)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
map[string]interface{}{
|
||||
"resp_bytes": int64(2326),
|
||||
"auth": "frank",
|
||||
"client_ip": "::1",
|
||||
"http_version": float64(1.0),
|
||||
"ident": "user-identifier",
|
||||
"request": "/apache_pb.gif",
|
||||
},
|
||||
m.Fields())
|
||||
assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags())
|
||||
assert.Equal(t, "my_web_log", m.Name())
|
||||
}
|
||||
|
||||
func TestCustomInfluxdbHttpd(t *testing.T) {
|
||||
p := &Parser{
|
||||
Patterns: []string{`\[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}`},
|
||||
|
||||
@@ -56,7 +56,7 @@ EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE}
|
||||
NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+
|
||||
NGUSER %{NGUSERNAME}
|
||||
# Wider-ranging client IP matching
|
||||
CLIENT (?:%{IPV6}|%{IPV4}|%{HOSTNAME}|%{HOSTPORT})
|
||||
CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1)
|
||||
|
||||
##
|
||||
## COMMON LOG PATTERNS
|
||||
|
||||
@@ -130,6 +130,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error {
|
||||
|
||||
sess, err := mgo.DialWithInfo(dialInfo)
|
||||
if err != nil {
|
||||
fmt.Printf("error dialing over ssl, %s\n", err.Error())
|
||||
return fmt.Errorf("Unable to connect to MongoDB, %s\n", err.Error())
|
||||
}
|
||||
server.Session = sess
|
||||
|
||||
@@ -21,6 +21,9 @@ type DbData struct {
|
||||
}
|
||||
|
||||
func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
|
||||
if statLine.NodeType != "" && statLine.NodeType != "UNK" {
|
||||
tags["state"] = statLine.NodeType
|
||||
}
|
||||
return &MongodbData{
|
||||
StatLine: statLine,
|
||||
Tags: tags,
|
||||
@@ -58,7 +61,6 @@ var DefaultReplStats = map[string]string{
|
||||
"repl_getmores_per_sec": "GetMoreR",
|
||||
"repl_commands_per_sec": "CommandR",
|
||||
"member_status": "NodeType",
|
||||
"state": "NodeState",
|
||||
"repl_lag": "ReplLag",
|
||||
}
|
||||
|
||||
|
||||
@@ -95,12 +95,12 @@ func TestStateTag(t *testing.T) {
|
||||
Insert: 0,
|
||||
Query: 0,
|
||||
NodeType: "PRI",
|
||||
NodeState: "PRIMARY",
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
||||
stateTags := make(map[string]string)
|
||||
stateTags["state"] = "PRI"
|
||||
|
||||
var acc testutil.Accumulator
|
||||
|
||||
@@ -115,7 +115,6 @@ func TestStateTag(t *testing.T) {
|
||||
"getmores_per_sec": int64(0),
|
||||
"inserts_per_sec": int64(0),
|
||||
"member_status": "PRI",
|
||||
"state": "PRIMARY",
|
||||
"net_in_bytes": int64(0),
|
||||
"net_out_bytes": int64(0),
|
||||
"open_connections": int64(0),
|
||||
|
||||
@@ -40,14 +40,15 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error
|
||||
return err
|
||||
}
|
||||
result_repl := &ReplSetStatus{}
|
||||
// ignore error because it simply indicates that the db is not a member
|
||||
// in a replica set, which is fine.
|
||||
_ = s.Session.DB("admin").Run(bson.D{
|
||||
err = s.Session.DB("admin").Run(bson.D{
|
||||
{
|
||||
Name: "replSetGetStatus",
|
||||
Value: 1,
|
||||
},
|
||||
}, result_repl)
|
||||
if err != nil {
|
||||
log.Println("E! Not gathering replica set status, member not in replica set (" + err.Error() + ")")
|
||||
}
|
||||
|
||||
jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count()
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/mgo.v2/bson"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -103,10 +105,9 @@ type ReplSetStatus struct {
|
||||
|
||||
// ReplSetMember stores information related to a replica set member
|
||||
type ReplSetMember struct {
|
||||
Name string `bson:"name"`
|
||||
State int64 `bson:"state"`
|
||||
StateStr string `bson:"stateStr"`
|
||||
OptimeDate time.Time `bson:"optimeDate"`
|
||||
Name string `bson:"name"`
|
||||
State int64 `bson:"state"`
|
||||
OptimeDate *bson.MongoTimestamp `bson:"optimeDate"`
|
||||
}
|
||||
|
||||
// WiredTiger stores information related to the WiredTiger storage engine.
|
||||
@@ -419,7 +420,6 @@ type StatLine struct {
|
||||
NumConnections int64
|
||||
ReplSetName string
|
||||
NodeType string
|
||||
NodeState string
|
||||
|
||||
// Cluster fields
|
||||
JumboChunksCount int64
|
||||
@@ -566,8 +566,6 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||
returnVal.NodeType = "PRI"
|
||||
} else if newStat.Repl.Secondary.(bool) {
|
||||
returnVal.NodeType = "SEC"
|
||||
} else if newStat.Repl.ArbiterOnly != nil && newStat.Repl.ArbiterOnly.(bool) {
|
||||
returnVal.NodeType = "ARB"
|
||||
} else {
|
||||
returnVal.NodeType = "UNK"
|
||||
}
|
||||
@@ -694,8 +692,6 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||
me := ReplSetMember{}
|
||||
for _, member := range newReplStat.Members {
|
||||
if member.Name == myName {
|
||||
// Store my state string
|
||||
returnVal.NodeState = member.StateStr
|
||||
if member.State == 1 {
|
||||
// I'm the master
|
||||
returnVal.ReplLag = 0
|
||||
@@ -710,9 +706,9 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec
|
||||
}
|
||||
}
|
||||
|
||||
if me.State == 2 {
|
||||
// OptimeDate.Unix() type is int64
|
||||
lag := master.OptimeDate.Unix() - me.OptimeDate.Unix()
|
||||
if me.OptimeDate != nil && master.OptimeDate != nil && me.State == 2 {
|
||||
// MongoTimestamp type is int64 where the first 32bits are the unix timestamp
|
||||
lag := int64(*master.OptimeDate>>32 - *me.OptimeDate>>32)
|
||||
if lag < 0 {
|
||||
returnVal.ReplLag = 0
|
||||
} else {
|
||||
|
||||
@@ -25,8 +25,8 @@ This plugin gathers the statistic data from MySQL server
|
||||
## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||
## e.g.
|
||||
## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
|
||||
## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
|
||||
## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false
|
||||
## db_user@tcp(127.0.0.1:3306)/?tls=false
|
||||
#
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
|
||||
@@ -41,8 +41,8 @@ var sampleConfig = `
|
||||
## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
|
||||
## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||
## e.g.
|
||||
## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
|
||||
## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
|
||||
## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false
|
||||
## db_user@tcp(127.0.0.1:3306)/?tls=false
|
||||
#
|
||||
## If no servers are specified, then localhost is used as the host.
|
||||
servers = ["tcp(127.0.0.1:3306)/"]
|
||||
@@ -828,13 +828,6 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum
|
||||
}
|
||||
|
||||
fields["queries"] = i
|
||||
case "Questions":
|
||||
i, err := strconv.ParseInt(string(val.([]byte)), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields["questions"] = i
|
||||
case "Slow_queries":
|
||||
i, err := strconv.ParseInt(string(val.([]byte)), 10, 64)
|
||||
if err != nil {
|
||||
|
||||
@@ -6,27 +6,6 @@ It can also check response text.
|
||||
### Configuration:
|
||||
|
||||
```
|
||||
[[inputs.net_response]]
|
||||
## Protocol, must be "tcp" or "udp"
|
||||
## NOTE: because the "udp" protocol does not respond to requests, it requires
|
||||
## a send/expect string pair (see below).
|
||||
protocol = "tcp"
|
||||
## Server address (default localhost)
|
||||
address = "localhost:80"
|
||||
## Set timeout
|
||||
timeout = "1s"
|
||||
|
||||
## Set read timeout (only used if expecting a response)
|
||||
read_timeout = "1s"
|
||||
|
||||
## The following options are required for UDP checks. For TCP, they are
|
||||
## optional. The plugin will send the given string to the server and then
|
||||
## expect to receive the given 'expect' string back.
|
||||
## string sent to the server
|
||||
# send = "ssh"
|
||||
## expected string in answer
|
||||
# expect = "ssh"
|
||||
|
||||
[[inputs.net_response]]
|
||||
protocol = "tcp"
|
||||
address = ":80"
|
||||
@@ -51,8 +30,6 @@ It can also check response text.
|
||||
protocol = "udp"
|
||||
address = "localhost:161"
|
||||
timeout = "2s"
|
||||
send = "hello server"
|
||||
expect = "hello client"
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
@@ -29,24 +29,18 @@ func (_ *NetResponse) Description() string {
|
||||
|
||||
var sampleConfig = `
|
||||
## Protocol, must be "tcp" or "udp"
|
||||
## NOTE: because the "udp" protocol does not respond to requests, it requires
|
||||
## a send/expect string pair (see below).
|
||||
protocol = "tcp"
|
||||
## Server address (default localhost)
|
||||
address = "localhost:80"
|
||||
address = "github.com:80"
|
||||
## Set timeout
|
||||
timeout = "1s"
|
||||
|
||||
## Optional string sent to the server
|
||||
# send = "ssh"
|
||||
## Optional expected string in answer
|
||||
# expect = "ssh"
|
||||
## Set read timeout (only used if expecting a response)
|
||||
read_timeout = "1s"
|
||||
|
||||
## The following options are required for UDP checks. For TCP, they are
|
||||
## optional. The plugin will send the given string to the server and then
|
||||
## expect to receive the given 'expect' string back.
|
||||
## string sent to the server
|
||||
# send = "ssh"
|
||||
## expected string in answer
|
||||
# expect = "ssh"
|
||||
`
|
||||
|
||||
func (_ *NetResponse) SampleConfig() string {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
This input plugin will measures the round-trip
|
||||
|
||||
## Windows:
|
||||
### Configuration:
|
||||
### Configration:
|
||||
```
|
||||
## urls to ping
|
||||
urls = ["www.google.com"] # required
|
||||
@@ -33,4 +33,4 @@ This input plugin will measures the round-trip
|
||||
```
|
||||
* Plugin: ping, Collection 1
|
||||
ping,host=WIN-PBAPLP511R7,url=www.google.com average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000
|
||||
```
|
||||
```
|
||||
@@ -84,7 +84,7 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
||||
strings.TrimSpace(out) + ", " + err.Error())
|
||||
}
|
||||
tags := map[string]string{"url": u}
|
||||
trans, rec, avg, stddev, err := processPingOutput(out)
|
||||
trans, rec, avg, err := processPingOutput(out)
|
||||
if err != nil {
|
||||
// fatal error
|
||||
errorChannel <- err
|
||||
@@ -100,9 +100,6 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
||||
if avg > 0 {
|
||||
fields["average_response_ms"] = avg
|
||||
}
|
||||
if stddev > 0 {
|
||||
fields["standard_deviation_ms"] = stddev
|
||||
}
|
||||
acc.AddFields("ping", fields, tags)
|
||||
}(url)
|
||||
}
|
||||
@@ -169,9 +166,9 @@ func (p *Ping) args(url string) []string {
|
||||
// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms
|
||||
//
|
||||
// It returns (<transmitted packets>, <received packets>, <average response>)
|
||||
func processPingOutput(out string) (int, int, float64, float64, error) {
|
||||
func processPingOutput(out string) (int, int, float64, error) {
|
||||
var trans, recv int
|
||||
var avg, stddev float64
|
||||
var avg float64
|
||||
// Set this error to nil if we find a 'transmitted' line
|
||||
err := errors.New("Fatal error processing ping output")
|
||||
lines := strings.Split(out, "\n")
|
||||
@@ -183,23 +180,22 @@ func processPingOutput(out string) (int, int, float64, float64, error) {
|
||||
// Transmitted packets
|
||||
trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0])
|
||||
if err != nil {
|
||||
return trans, recv, avg, stddev, err
|
||||
return trans, recv, avg, err
|
||||
}
|
||||
// Received packets
|
||||
recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0])
|
||||
if err != nil {
|
||||
return trans, recv, avg, stddev, err
|
||||
return trans, recv, avg, err
|
||||
}
|
||||
} else if strings.Contains(line, "min/avg/max") {
|
||||
stats := strings.Split(line, " ")[3]
|
||||
stats := strings.Split(line, " = ")[1]
|
||||
avg, err = strconv.ParseFloat(strings.Split(stats, "/")[1], 64)
|
||||
stddev, err = strconv.ParseFloat(strings.Split(stats, "/")[3], 64)
|
||||
if err != nil {
|
||||
return trans, recv, avg, stddev, err
|
||||
return trans, recv, avg, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return trans, recv, avg, stddev, err
|
||||
return trans, recv, avg, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -48,25 +48,23 @@ ping: -i interval too short: Operation not permitted
|
||||
|
||||
// Test that ping command output is processed properly
|
||||
func TestProcessPingOutput(t *testing.T) {
|
||||
trans, rec, avg, stddev, err := processPingOutput(bsdPingOutput)
|
||||
trans, rec, avg, err := processPingOutput(bsdPingOutput)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, trans, "5 packets were transmitted")
|
||||
assert.Equal(t, 5, rec, "5 packets were transmitted")
|
||||
assert.InDelta(t, 20.224, avg, 0.001)
|
||||
assert.InDelta(t, 4.076, stddev, 0.001)
|
||||
|
||||
trans, rec, avg, stddev, err = processPingOutput(linuxPingOutput)
|
||||
trans, rec, avg, err = processPingOutput(linuxPingOutput)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, trans, "5 packets were transmitted")
|
||||
assert.Equal(t, 5, rec, "5 packets were transmitted")
|
||||
assert.InDelta(t, 43.628, avg, 0.001)
|
||||
assert.InDelta(t, 5.325, stddev, 0.001)
|
||||
}
|
||||
|
||||
// Test that processPingOutput returns an error when 'ping' fails to run, such
|
||||
// as when an invalid argument is provided
|
||||
func TestErrorProcessPingOutput(t *testing.T) {
|
||||
_, _, _, _, err := processPingOutput(fatalPingOutput)
|
||||
_, _, _, err := processPingOutput(fatalPingOutput)
|
||||
assert.Error(t, err, "Error was expected from processPingOutput")
|
||||
}
|
||||
|
||||
@@ -147,11 +145,10 @@ func TestPingGather(t *testing.T) {
|
||||
p.Gather(&acc)
|
||||
tags := map[string]string{"url": "www.google.com"}
|
||||
fields := map[string]interface{}{
|
||||
"packets_transmitted": 5,
|
||||
"packets_received": 5,
|
||||
"percent_packet_loss": 0.0,
|
||||
"average_response_ms": 43.628,
|
||||
"standard_deviation_ms": 5.325,
|
||||
"packets_transmitted": 5,
|
||||
"packets_received": 5,
|
||||
"percent_packet_loss": 0.0,
|
||||
"average_response_ms": 43.628,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
|
||||
|
||||
@@ -185,11 +182,10 @@ func TestLossyPingGather(t *testing.T) {
|
||||
p.Gather(&acc)
|
||||
tags := map[string]string{"url": "www.google.com"}
|
||||
fields := map[string]interface{}{
|
||||
"packets_transmitted": 5,
|
||||
"packets_received": 3,
|
||||
"percent_packet_loss": 40.0,
|
||||
"average_response_ms": 44.033,
|
||||
"standard_deviation_ms": 5.325,
|
||||
"packets_transmitted": 5,
|
||||
"packets_received": 3,
|
||||
"percent_packet_loss": 40.0,
|
||||
"average_response_ms": 44.033,
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "ping", fields, tags)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ type Procstat struct {
|
||||
Prefix string
|
||||
ProcessName string
|
||||
User string
|
||||
PidTag bool
|
||||
|
||||
// pidmap maps a pid to a process object, so we don't recreate every gather
|
||||
pidmap map[int32]*process.Process
|
||||
@@ -54,8 +53,6 @@ var sampleConfig = `
|
||||
prefix = ""
|
||||
## comment this out if you want raw cpu_time stats
|
||||
fielddrop = ["cpu_time_*"]
|
||||
## This is optional; moves pid into a tag instead of a field
|
||||
pid_tag = false
|
||||
`
|
||||
|
||||
func (_ *Procstat) SampleConfig() string {
|
||||
@@ -73,9 +70,6 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error {
|
||||
p.Exe, p.PidFile, p.Pattern, p.User, err.Error())
|
||||
} else {
|
||||
for pid, proc := range p.pidmap {
|
||||
if p.PidTag {
|
||||
p.tagmap[pid]["pid"] = fmt.Sprint(pid)
|
||||
}
|
||||
p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, proc, p.tagmap[pid])
|
||||
p.pushMetrics()
|
||||
}
|
||||
|
||||
@@ -48,12 +48,7 @@ func (p *SpecProcessor) pushMetrics() {
|
||||
if p.Prefix != "" {
|
||||
prefix = p.Prefix + "_"
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
//If pid is not present as a tag, include it as a field.
|
||||
if _, pidInTags := p.tags["pid"]; !pidInTags {
|
||||
fields["pid"] = p.pid
|
||||
}
|
||||
fields := map[string]interface{}{"pid": p.pid}
|
||||
|
||||
numThreads, err := p.proc.NumThreads()
|
||||
if err == nil {
|
||||
|
||||
@@ -13,17 +13,6 @@ Example for Kubernetes apiserver
|
||||
urls = ["http://my-kube-apiserver:8080/metrics"]
|
||||
```
|
||||
|
||||
Specify a 10 second timeout for slower/over-loaded clients
|
||||
```toml
|
||||
# Get all metrics from Kube-apiserver
|
||||
[[inputs.prometheus]]
|
||||
# An array of urls to scrape metrics from.
|
||||
urls = ["http://my-kube-apiserver:8080/metrics"]
|
||||
|
||||
# Specify timeout duration for slower prometheus clients (default is 3s)
|
||||
response_timeout = "10s"
|
||||
```
|
||||
|
||||
You can use more complex configuration
|
||||
to filter and some tags
|
||||
|
||||
|
||||
@@ -21,8 +21,6 @@ type Prometheus struct {
|
||||
// Bearer Token authorization file path
|
||||
BearerToken string `toml:"bearer_token"`
|
||||
|
||||
ResponseTimeout internal.Duration `toml:"response_timeout"`
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
// Path to host cert file
|
||||
@@ -40,9 +38,6 @@ var sampleConfig = `
|
||||
## Use bearer token for authorization
|
||||
# bearer_token = /path/to/bearer/token
|
||||
|
||||
## Specify timeout duration for slower prometheus clients (default is 3s)
|
||||
# response_timeout = "3s"
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = /path/to/cafile
|
||||
# ssl_cert = /path/to/certfile
|
||||
@@ -110,7 +105,7 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
TLSClientConfig: tlsCfg,
|
||||
ResponseHeaderTimeout: p.ResponseTimeout.Duration,
|
||||
ResponseHeaderTimeout: time.Duration(3 * time.Second),
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
|
||||
@@ -153,6 +148,6 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error {
|
||||
|
||||
func init() {
|
||||
inputs.Add("prometheus", func() telegraf.Input {
|
||||
return &Prometheus{ResponseTimeout: internal.Duration{Duration: time.Second * 3}}
|
||||
return &Prometheus{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -30,5 +30,5 @@
|
||||
last_run: 1444936531
|
||||
cron: 0.000584
|
||||
version:
|
||||
config: "environment:d6018ce"
|
||||
puppet: "3.7.5"
|
||||
config: 1444936521
|
||||
puppet: "3.7.5"
|
||||
@@ -68,8 +68,8 @@ type time struct {
|
||||
}
|
||||
|
||||
type version struct {
|
||||
ConfigString string `yaml:"config"`
|
||||
Puppet string `yaml:"puppet"`
|
||||
Config int64 `yaml:"config"`
|
||||
Puppet string `yaml:"puppet"`
|
||||
}
|
||||
|
||||
// SampleConfig returns sample configuration message
|
||||
|
||||
@@ -28,7 +28,7 @@ func TestGather(t *testing.T) {
|
||||
"resources_outofsync": int64(0),
|
||||
"changes_total": int64(0),
|
||||
"time_lastrun": int64(1444936531),
|
||||
"version_configstring": "environment:d6018ce",
|
||||
"version_config": int64(1444936521),
|
||||
"time_user": float64(0.004331),
|
||||
"time_schedule": float64(0.001123),
|
||||
"time_filebucket": float64(0.000353),
|
||||
|
||||
@@ -155,12 +155,17 @@ var sampleConfig = `
|
||||
|
||||
## Optional request timeouts
|
||||
##
|
||||
## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
|
||||
## for a server's response headers after fully writing the request.
|
||||
## ResponseHeaderTimeout, if non-zero, specifies the amount of
|
||||
## time to wait for a server's response headers after fully
|
||||
## writing the request (including its body, if any). This
|
||||
## time does not include the time to read the response body.
|
||||
## See http.Transport.ResponseHeaderTimeout
|
||||
# header_timeout = "3s"
|
||||
##
|
||||
## client_timeout specifies a time limit for requests made by this client.
|
||||
## Includes connection time, any redirects, and reading the response body.
|
||||
## Timeout specifies a time limit for requests made by this
|
||||
## Client. The timeout includes connection time, any
|
||||
## redirects, and reading the response body.
|
||||
## See http.Client.Timeout
|
||||
# client_timeout = "4s"
|
||||
|
||||
## A list of nodes to pull metrics about. If not specified, metrics for
|
||||
|
||||
@@ -158,7 +158,7 @@ func gatherInfoOutput(
|
||||
tags map[string]string,
|
||||
) error {
|
||||
var section string
|
||||
var keyspace_hits, keyspace_misses int64
|
||||
var keyspace_hits, keyspace_misses uint64 = 0, 0
|
||||
|
||||
scanner := bufio.NewScanner(rdr)
|
||||
fields := make(map[string]interface{})
|
||||
@@ -210,8 +210,8 @@ func gatherInfoOutput(
|
||||
|
||||
val := strings.TrimSpace(parts[1])
|
||||
|
||||
// Try parsing as int
|
||||
if ival, err := strconv.ParseInt(val, 10, 64); err == nil {
|
||||
// Try parsing as a uint
|
||||
if ival, err := strconv.ParseUint(val, 10, 64); err == nil {
|
||||
switch name {
|
||||
case "keyspace_hits":
|
||||
keyspace_hits = ival
|
||||
@@ -219,12 +219,18 @@ func gatherInfoOutput(
|
||||
keyspace_misses = ival
|
||||
case "rdb_last_save_time":
|
||||
// influxdb can't calculate this, so we have to do it
|
||||
fields["rdb_last_save_time_elapsed"] = time.Now().Unix() - ival
|
||||
fields["rdb_last_save_time_elapsed"] = uint64(time.Now().Unix()) - ival
|
||||
}
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
// Try parsing as an int
|
||||
if ival, err := strconv.ParseInt(val, 10, 64); err == nil {
|
||||
fields[metric] = ival
|
||||
continue
|
||||
}
|
||||
|
||||
// Try parsing as a float
|
||||
if fval, err := strconv.ParseFloat(val, 64); err == nil {
|
||||
fields[metric] = fval
|
||||
@@ -269,7 +275,7 @@ func gatherKeyspaceLine(
|
||||
dbparts := strings.Split(line, ",")
|
||||
for _, dbp := range dbparts {
|
||||
kv := strings.Split(dbp, "=")
|
||||
ival, err := strconv.ParseInt(kv[1], 10, 64)
|
||||
ival, err := strconv.ParseUint(kv[1], 10, 64)
|
||||
if err == nil {
|
||||
fields[kv[0]] = ival
|
||||
}
|
||||
|
||||
@@ -39,53 +39,53 @@ func TestRedis_ParseMetrics(t *testing.T) {
|
||||
|
||||
tags = map[string]string{"host": "redis.net", "replication_role": "master"}
|
||||
fields := map[string]interface{}{
|
||||
"uptime": int64(238),
|
||||
"lru_clock": int64(2364819),
|
||||
"clients": int64(1),
|
||||
"client_longest_output_list": int64(0),
|
||||
"client_biggest_input_buf": int64(0),
|
||||
"blocked_clients": int64(0),
|
||||
"used_memory": int64(1003936),
|
||||
"used_memory_rss": int64(811008),
|
||||
"used_memory_peak": int64(1003936),
|
||||
"used_memory_lua": int64(33792),
|
||||
"uptime": uint64(238),
|
||||
"lru_clock": uint64(2364819),
|
||||
"clients": uint64(1),
|
||||
"client_longest_output_list": uint64(0),
|
||||
"client_biggest_input_buf": uint64(0),
|
||||
"blocked_clients": uint64(0),
|
||||
"used_memory": uint64(1003936),
|
||||
"used_memory_rss": uint64(811008),
|
||||
"used_memory_peak": uint64(1003936),
|
||||
"used_memory_lua": uint64(33792),
|
||||
"mem_fragmentation_ratio": float64(0.81),
|
||||
"loading": int64(0),
|
||||
"rdb_changes_since_last_save": int64(0),
|
||||
"rdb_bgsave_in_progress": int64(0),
|
||||
"rdb_last_save_time": int64(1428427941),
|
||||
"loading": uint64(0),
|
||||
"rdb_changes_since_last_save": uint64(0),
|
||||
"rdb_bgsave_in_progress": uint64(0),
|
||||
"rdb_last_save_time": uint64(1428427941),
|
||||
"rdb_last_bgsave_status": "ok",
|
||||
"rdb_last_bgsave_time_sec": int64(-1),
|
||||
"rdb_current_bgsave_time_sec": int64(-1),
|
||||
"aof_enabled": int64(0),
|
||||
"aof_rewrite_in_progress": int64(0),
|
||||
"aof_rewrite_scheduled": int64(0),
|
||||
"aof_enabled": uint64(0),
|
||||
"aof_rewrite_in_progress": uint64(0),
|
||||
"aof_rewrite_scheduled": uint64(0),
|
||||
"aof_last_rewrite_time_sec": int64(-1),
|
||||
"aof_current_rewrite_time_sec": int64(-1),
|
||||
"aof_last_bgrewrite_status": "ok",
|
||||
"aof_last_write_status": "ok",
|
||||
"total_connections_received": int64(2),
|
||||
"total_commands_processed": int64(1),
|
||||
"instantaneous_ops_per_sec": int64(0),
|
||||
"total_connections_received": uint64(2),
|
||||
"total_commands_processed": uint64(1),
|
||||
"instantaneous_ops_per_sec": uint64(0),
|
||||
"instantaneous_input_kbps": float64(876.16),
|
||||
"instantaneous_output_kbps": float64(3010.23),
|
||||
"rejected_connections": int64(0),
|
||||
"sync_full": int64(0),
|
||||
"sync_partial_ok": int64(0),
|
||||
"sync_partial_err": int64(0),
|
||||
"expired_keys": int64(0),
|
||||
"evicted_keys": int64(0),
|
||||
"keyspace_hits": int64(1),
|
||||
"keyspace_misses": int64(1),
|
||||
"pubsub_channels": int64(0),
|
||||
"pubsub_patterns": int64(0),
|
||||
"latest_fork_usec": int64(0),
|
||||
"connected_slaves": int64(0),
|
||||
"master_repl_offset": int64(0),
|
||||
"repl_backlog_active": int64(0),
|
||||
"repl_backlog_size": int64(1048576),
|
||||
"repl_backlog_first_byte_offset": int64(0),
|
||||
"repl_backlog_histlen": int64(0),
|
||||
"rejected_connections": uint64(0),
|
||||
"sync_full": uint64(0),
|
||||
"sync_partial_ok": uint64(0),
|
||||
"sync_partial_err": uint64(0),
|
||||
"expired_keys": uint64(0),
|
||||
"evicted_keys": uint64(0),
|
||||
"keyspace_hits": uint64(1),
|
||||
"keyspace_misses": uint64(1),
|
||||
"pubsub_channels": uint64(0),
|
||||
"pubsub_patterns": uint64(0),
|
||||
"latest_fork_usec": uint64(0),
|
||||
"connected_slaves": uint64(0),
|
||||
"master_repl_offset": uint64(0),
|
||||
"repl_backlog_active": uint64(0),
|
||||
"repl_backlog_size": uint64(1048576),
|
||||
"repl_backlog_first_byte_offset": uint64(0),
|
||||
"repl_backlog_histlen": uint64(0),
|
||||
"used_cpu_sys": float64(0.14),
|
||||
"used_cpu_user": float64(0.05),
|
||||
"used_cpu_sys_children": float64(0.00),
|
||||
@@ -102,15 +102,15 @@ func TestRedis_ParseMetrics(t *testing.T) {
|
||||
}
|
||||
}
|
||||
assert.InDelta(t,
|
||||
time.Now().Unix()-fields["rdb_last_save_time"].(int64),
|
||||
fields["rdb_last_save_time_elapsed"].(int64),
|
||||
uint64(time.Now().Unix())-fields["rdb_last_save_time"].(uint64),
|
||||
fields["rdb_last_save_time_elapsed"].(uint64),
|
||||
2) // allow for 2 seconds worth of offset
|
||||
|
||||
keyspaceTags := map[string]string{"host": "redis.net", "replication_role": "master", "database": "db0"}
|
||||
keyspaceFields := map[string]interface{}{
|
||||
"avg_ttl": int64(0),
|
||||
"expires": int64(0),
|
||||
"keys": int64(2),
|
||||
"avg_ttl": uint64(0),
|
||||
"expires": uint64(0),
|
||||
"keys": uint64(2),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "redis", fields, tags)
|
||||
acc.AssertContainsTaggedFields(t, "redis_keyspace", keyspaceFields, keyspaceTags)
|
||||
|
||||
@@ -146,13 +146,13 @@ func (s *Snmp) init() error {
|
||||
|
||||
for i := range s.Tables {
|
||||
if err := s.Tables[i].init(); err != nil {
|
||||
return Errorf(err, "initializing table %s", s.Tables[i].Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for i := range s.Fields {
|
||||
if err := s.Fields[i].init(); err != nil {
|
||||
return Errorf(err, "initializing field %s", s.Fields[i].Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@ func (t *Table) init() error {
|
||||
// initialize all the nested fields
|
||||
for i := range t.Fields {
|
||||
if err := t.Fields[i].init(); err != nil {
|
||||
return Errorf(err, "initializing field %s", t.Fields[i].Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,7 +210,7 @@ func (t *Table) initBuild() error {
|
||||
|
||||
_, _, oidText, fields, err := snmpTable(t.Oid)
|
||||
if err != nil {
|
||||
return err
|
||||
return Errorf(err, "initializing table %s", t.Oid)
|
||||
}
|
||||
if t.Name == "" {
|
||||
t.Name = oidText
|
||||
@@ -252,7 +252,7 @@ func (f *Field) init() error {
|
||||
|
||||
_, oidNum, oidText, conversion, err := snmpTranslate(f.Oid)
|
||||
if err != nil {
|
||||
return Errorf(err, "translating")
|
||||
return Errorf(err, "translating %s", f.Oid)
|
||||
}
|
||||
f.Oid = oidNum
|
||||
if f.Name == "" {
|
||||
@@ -358,7 +358,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
|
||||
// Now is the real tables.
|
||||
for _, t := range s.Tables {
|
||||
if err := s.gatherTable(acc, gs, t, topTags, true); err != nil {
|
||||
acc.AddError(Errorf(err, "agent %s: gathering table %s", agent, t.Name))
|
||||
acc.AddError(Errorf(err, "agent %s", agent))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -406,7 +406,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
|
||||
}
|
||||
|
||||
if len(f.Oid) == 0 {
|
||||
return nil, fmt.Errorf("cannot have empty OID on field %s", f.Name)
|
||||
return nil, fmt.Errorf("cannot have empty OID")
|
||||
}
|
||||
var oid string
|
||||
if f.Oid[0] == '.' {
|
||||
@@ -426,12 +426,12 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
|
||||
// empty string. This results in all the non-table fields sharing the same
|
||||
// index, and being added on the same row.
|
||||
if pkt, err := gs.Get([]string{oid}); err != nil {
|
||||
return nil, Errorf(err, "performing get on field %s", f.Name)
|
||||
return nil, Errorf(err, "performing get")
|
||||
} else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance {
|
||||
ent := pkt.Variables[0]
|
||||
fv, err := fieldConvert(f.Conversion, ent.Value)
|
||||
if err != nil {
|
||||
return nil, Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name)
|
||||
return nil, Errorf(err, "converting %q", ent.Value)
|
||||
}
|
||||
if fvs, ok := fv.(string); !ok || fvs != "" {
|
||||
ifv[""] = fv
|
||||
@@ -454,7 +454,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
|
||||
|
||||
fv, err := fieldConvert(f.Conversion, ent.Value)
|
||||
if err != nil {
|
||||
return Errorf(err, "converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name)
|
||||
return Errorf(err, "converting %q", ent.Value)
|
||||
}
|
||||
if fvs, ok := fv.(string); !ok || fvs != "" {
|
||||
ifv[idx] = fv
|
||||
@@ -463,7 +463,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) {
|
||||
})
|
||||
if err != nil {
|
||||
if _, ok := err.(NestedError); !ok {
|
||||
return nil, Errorf(err, "performing bulk walk for field %s", f.Name)
|
||||
return nil, Errorf(err, "performing bulk walk")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,19 +7,14 @@
|
||||
[[inputs.statsd]]
|
||||
## Address and port to host UDP listener on
|
||||
service_address = ":8125"
|
||||
|
||||
## The following configuration options control when telegraf clears it's cache
|
||||
## of previous values. If set to false, then telegraf will only clear it's
|
||||
## cache when the daemon is restarted.
|
||||
## Reset gauges every interval (default=true)
|
||||
delete_gauges = true
|
||||
## Reset counters every interval (default=true)
|
||||
delete_counters = true
|
||||
## Reset sets every interval (default=true)
|
||||
delete_sets = true
|
||||
## Reset timings & histograms every interval (default=true)
|
||||
## Delete gauges every interval (default=false)
|
||||
delete_gauges = false
|
||||
## Delete counters every interval (default=false)
|
||||
delete_counters = false
|
||||
## Delete sets every interval (default=false)
|
||||
delete_sets = false
|
||||
## Delete timings & histograms every interval (default=true)
|
||||
delete_timings = true
|
||||
|
||||
## Percentiles to calculate for timing & histogram stats
|
||||
percentiles = [90]
|
||||
|
||||
|
||||
@@ -32,6 +32,8 @@ var dropwarn = "E! Error: statsd message queue full. " +
|
||||
"We have dropped %d messages so far. " +
|
||||
"You may want to increase allowed_pending_messages in the config\n"
|
||||
|
||||
var prevInstance *Statsd
|
||||
|
||||
type Statsd struct {
|
||||
// Address & Port to serve from
|
||||
ServiceAddress string
|
||||
@@ -96,7 +98,6 @@ type metric struct {
|
||||
hash string
|
||||
intvalue int64
|
||||
floatvalue float64
|
||||
strvalue string
|
||||
mtype string
|
||||
additive bool
|
||||
samplerate float64
|
||||
@@ -105,7 +106,7 @@ type metric struct {
|
||||
|
||||
type cachedset struct {
|
||||
name string
|
||||
fields map[string]map[string]bool
|
||||
fields map[string]map[int64]bool
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
@@ -134,19 +135,14 @@ func (_ *Statsd) Description() string {
|
||||
const sampleConfig = `
|
||||
## Address and port to host UDP listener on
|
||||
service_address = ":8125"
|
||||
|
||||
## The following configuration options control when telegraf clears it's cache
|
||||
## of previous values. If set to false, then telegraf will only clear it's
|
||||
## cache when the daemon is restarted.
|
||||
## Reset gauges every interval (default=true)
|
||||
delete_gauges = true
|
||||
## Reset counters every interval (default=true)
|
||||
delete_counters = true
|
||||
## Reset sets every interval (default=true)
|
||||
delete_sets = true
|
||||
## Reset timings & histograms every interval (default=true)
|
||||
## Delete gauges every interval (default=false)
|
||||
delete_gauges = false
|
||||
## Delete counters every interval (default=false)
|
||||
delete_counters = false
|
||||
## Delete sets every interval (default=false)
|
||||
delete_sets = false
|
||||
## Delete timings & histograms every interval (default=true)
|
||||
delete_timings = true
|
||||
|
||||
## Percentiles to calculate for timing & histogram stats
|
||||
percentiles = [90]
|
||||
|
||||
@@ -242,10 +238,17 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error {
|
||||
s.done = make(chan struct{})
|
||||
s.in = make(chan []byte, s.AllowedPendingMessages)
|
||||
|
||||
s.gauges = make(map[string]cachedgauge)
|
||||
s.counters = make(map[string]cachedcounter)
|
||||
s.sets = make(map[string]cachedset)
|
||||
s.timings = make(map[string]cachedtimings)
|
||||
if prevInstance == nil {
|
||||
s.gauges = make(map[string]cachedgauge)
|
||||
s.counters = make(map[string]cachedcounter)
|
||||
s.sets = make(map[string]cachedset)
|
||||
s.timings = make(map[string]cachedtimings)
|
||||
} else {
|
||||
s.gauges = prevInstance.gauges
|
||||
s.counters = prevInstance.counters
|
||||
s.sets = prevInstance.sets
|
||||
s.timings = prevInstance.timings
|
||||
}
|
||||
|
||||
if s.ConvertNames {
|
||||
log.Printf("I! WARNING statsd: convert_names config option is deprecated," +
|
||||
@@ -262,6 +265,7 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error {
|
||||
// Start the line parser
|
||||
go s.parser()
|
||||
log.Printf("I! Started the statsd service on %s\n", s.ServiceAddress)
|
||||
prevInstance = s
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -416,8 +420,8 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||
|
||||
// Parse the value
|
||||
if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") {
|
||||
if m.mtype != "g" && m.mtype != "c" {
|
||||
log.Printf("E! Error: +- values are only supported for gauges & counters: %s\n", line)
|
||||
if m.mtype != "g" {
|
||||
log.Printf("E! Error: +- values are only supported for gauges: %s\n", line)
|
||||
return errors.New("Error Parsing statsd line")
|
||||
}
|
||||
m.additive = true
|
||||
@@ -431,7 +435,7 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||
return errors.New("Error Parsing statsd line")
|
||||
}
|
||||
m.floatvalue = v
|
||||
case "c":
|
||||
case "c", "s":
|
||||
var v int64
|
||||
v, err := strconv.ParseInt(pipesplit[0], 10, 64)
|
||||
if err != nil {
|
||||
@@ -447,8 +451,6 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||
v = int64(float64(v) / m.samplerate)
|
||||
}
|
||||
m.intvalue = v
|
||||
case "s":
|
||||
m.strvalue = pipesplit[0]
|
||||
}
|
||||
|
||||
// Parse the name & tags from bucket
|
||||
@@ -623,16 +625,16 @@ func (s *Statsd) aggregate(m metric) {
|
||||
if !ok {
|
||||
s.sets[m.hash] = cachedset{
|
||||
name: m.name,
|
||||
fields: make(map[string]map[string]bool),
|
||||
fields: make(map[string]map[int64]bool),
|
||||
tags: m.tags,
|
||||
}
|
||||
}
|
||||
// check if the field exists
|
||||
_, ok = s.sets[m.hash].fields[m.field]
|
||||
if !ok {
|
||||
s.sets[m.hash].fields[m.field] = make(map[string]bool)
|
||||
s.sets[m.hash].fields[m.field] = make(map[int64]bool)
|
||||
}
|
||||
s.sets[m.hash].fields[m.field][m.strvalue] = true
|
||||
s.sets[m.hash].fields[m.field][m.intvalue] = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -649,13 +651,8 @@ func (s *Statsd) Stop() {
|
||||
func init() {
|
||||
inputs.Add("statsd", func() telegraf.Input {
|
||||
return &Statsd{
|
||||
ServiceAddress: ":8125",
|
||||
MetricSeparator: "_",
|
||||
AllowedPendingMessages: defaultAllowPendingMessage,
|
||||
DeleteCounters: true,
|
||||
DeleteGauges: true,
|
||||
DeleteSets: true,
|
||||
DeleteTimings: true,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -139,9 +139,6 @@ func TestParse_Sets(t *testing.T) {
|
||||
"scientific.notation.sets:4.696E+5|s",
|
||||
"scientific.notation.sets:4.696E+5|s",
|
||||
"scientific.notation.sets:4.697E+5|s",
|
||||
"string.sets:foobar|s",
|
||||
"string.sets:foobar|s",
|
||||
"string.sets:bar|s",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
@@ -167,10 +164,6 @@ func TestParse_Sets(t *testing.T) {
|
||||
"oneuser_id",
|
||||
1,
|
||||
},
|
||||
{
|
||||
"string_sets",
|
||||
2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range validations {
|
||||
@@ -197,8 +190,6 @@ func TestParse_Counters(t *testing.T) {
|
||||
"sample.rate:1|c|@0.1",
|
||||
"sample.rate:1|c",
|
||||
"scientific.notation:4.696E+5|c",
|
||||
"negative.test:100|c",
|
||||
"negative.test:-5|c",
|
||||
}
|
||||
|
||||
for _, line := range valid_lines {
|
||||
@@ -232,10 +223,6 @@ func TestParse_Counters(t *testing.T) {
|
||||
"sample_rate",
|
||||
11,
|
||||
},
|
||||
{
|
||||
"negative_test",
|
||||
95,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range validations {
|
||||
@@ -305,9 +292,11 @@ func TestParse_InvalidLines(t *testing.T) {
|
||||
"i.dont.have.a.pipe:45g",
|
||||
"i.dont.have.a.colon45|c",
|
||||
"invalid.metric.type:45|e",
|
||||
"invalid.plus.minus.non.gauge:+10|c",
|
||||
"invalid.plus.minus.non.gauge:+10|s",
|
||||
"invalid.plus.minus.non.gauge:+10|ms",
|
||||
"invalid.plus.minus.non.gauge:+10|h",
|
||||
"invalid.plus.minus.non.gauge:-10|c",
|
||||
"invalid.value:foobar|c",
|
||||
"invalid.value:d11|c",
|
||||
"invalid.value:1d1|c",
|
||||
|
||||
@@ -19,7 +19,6 @@ to the unix `uptime` command.
|
||||
- load15 (float)
|
||||
- load5 (float)
|
||||
- n_users (integer)
|
||||
- n_cpus (integer)
|
||||
- uptime (integer, seconds)
|
||||
- uptime_format (string)
|
||||
|
||||
@@ -32,7 +31,5 @@ None
|
||||
```
|
||||
$ telegraf -config ~/ws/telegraf.conf -input-filter system -test
|
||||
* Plugin: system, Collection 1
|
||||
* Plugin: inputs.system, Collection 1
|
||||
> system,host=tyrion load1=3.72,load5=2.4,load15=2.1,n_users=3i,n_cpus=4i 1483964144000000000
|
||||
> system,host=tyrion uptime=1249632i,uptime_format="14 days, 11:07" 1483964144000000000
|
||||
> system load1=2.05,load15=2.38,load5=2.03,n_users=4i,uptime=239043i,uptime_format="2 days, 18:24" 1457546165399253452
|
||||
```
|
||||
|
||||
@@ -73,10 +73,7 @@ func (s *systemPS) DiskUsage(
|
||||
var usage []*disk.UsageStat
|
||||
var partitions []*disk.PartitionStat
|
||||
|
||||
for i := range parts {
|
||||
|
||||
p := parts[i]
|
||||
|
||||
for _, p := range parts {
|
||||
if len(mountPointFilter) > 0 {
|
||||
// If the mount point is not a member of the filter set,
|
||||
// don't gather info on it.
|
||||
|
||||
@@ -36,8 +36,6 @@ The plugin expects messages in one of the
|
||||
files = ["/var/mymetrics.out"]
|
||||
## Read file from beginning.
|
||||
from_beginning = false
|
||||
## Whether file is a named pipe
|
||||
pipe = false
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
type Tail struct {
|
||||
Files []string
|
||||
FromBeginning bool
|
||||
Pipe bool
|
||||
|
||||
tailers []*tail.Tail
|
||||
parser parsers.Parser
|
||||
@@ -45,8 +44,6 @@ const sampleConfig = `
|
||||
files = ["/var/mymetrics.out"]
|
||||
## Read file from beginning.
|
||||
from_beginning = false
|
||||
## Whether file is a named pipe
|
||||
pipe = false
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
@@ -73,12 +70,10 @@ func (t *Tail) Start(acc telegraf.Accumulator) error {
|
||||
|
||||
t.acc = acc
|
||||
|
||||
var seek *tail.SeekInfo
|
||||
if !t.Pipe && !t.FromBeginning {
|
||||
seek = &tail.SeekInfo{
|
||||
Whence: 2,
|
||||
Offset: 0,
|
||||
}
|
||||
var seek tail.SeekInfo
|
||||
if !t.FromBeginning {
|
||||
seek.Whence = 2
|
||||
seek.Offset = 0
|
||||
}
|
||||
|
||||
var errS string
|
||||
@@ -93,9 +88,8 @@ func (t *Tail) Start(acc telegraf.Accumulator) error {
|
||||
tail.Config{
|
||||
ReOpen: true,
|
||||
Follow: true,
|
||||
Location: seek,
|
||||
Location: &seek,
|
||||
MustExist: true,
|
||||
Pipe: t.Pipe,
|
||||
})
|
||||
if err != nil {
|
||||
errS += err.Error() + " "
|
||||
@@ -136,10 +130,6 @@ func (t *Tail) receiver(tailer *tail.Tail) {
|
||||
tailer.Filename, line.Text, err)
|
||||
}
|
||||
}
|
||||
if err := tailer.Err(); err != nil {
|
||||
log.Printf("E! Error tailing file %s, Error: %s\n",
|
||||
tailer.Filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tail) Stop() {
|
||||
|
||||
@@ -1,419 +0,0 @@
|
||||
// Copyright (c) 2010 The win Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions
|
||||
// are met:
|
||||
// 1. Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
// 3. The names of the authors may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
|
||||
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// This is the official list of 'win' authors for copyright purposes.
|
||||
//
|
||||
// Alexander Neumann <an2048@googlemail.com>
|
||||
// Joseph Watson <jtwatson@linux-consulting.us>
|
||||
// Kevin Pors <krpors@gmail.com>
|
||||
|
||||
// +build windows
|
||||
|
||||
package win_perf_counters
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Error codes
|
||||
const (
|
||||
ERROR_SUCCESS = 0
|
||||
ERROR_INVALID_FUNCTION = 1
|
||||
)
|
||||
|
||||
type (
|
||||
HANDLE uintptr
|
||||
)
|
||||
|
||||
// PDH error codes, which can be returned by all Pdh* functions. Taken from mingw-w64 pdhmsg.h
|
||||
const (
|
||||
PDH_CSTATUS_VALID_DATA = 0x00000000 // The returned data is valid.
|
||||
PDH_CSTATUS_NEW_DATA = 0x00000001 // The return data value is valid and different from the last sample.
|
||||
PDH_CSTATUS_NO_MACHINE = 0x800007D0 // Unable to connect to the specified computer, or the computer is offline.
|
||||
PDH_CSTATUS_NO_INSTANCE = 0x800007D1
|
||||
PDH_MORE_DATA = 0x800007D2 // The PdhGetFormattedCounterArray* function can return this if there's 'more data to be displayed'.
|
||||
PDH_CSTATUS_ITEM_NOT_VALIDATED = 0x800007D3
|
||||
PDH_RETRY = 0x800007D4
|
||||
PDH_NO_DATA = 0x800007D5 // The query does not currently contain any counters (for example, limited access)
|
||||
PDH_CALC_NEGATIVE_DENOMINATOR = 0x800007D6
|
||||
PDH_CALC_NEGATIVE_TIMEBASE = 0x800007D7
|
||||
PDH_CALC_NEGATIVE_VALUE = 0x800007D8
|
||||
PDH_DIALOG_CANCELLED = 0x800007D9
|
||||
PDH_END_OF_LOG_FILE = 0x800007DA
|
||||
PDH_ASYNC_QUERY_TIMEOUT = 0x800007DB
|
||||
PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE = 0x800007DC
|
||||
PDH_CSTATUS_NO_OBJECT = 0xC0000BB8
|
||||
PDH_CSTATUS_NO_COUNTER = 0xC0000BB9 // The specified counter could not be found.
|
||||
PDH_CSTATUS_INVALID_DATA = 0xC0000BBA // The counter was successfully found, but the data returned is not valid.
|
||||
PDH_MEMORY_ALLOCATION_FAILURE = 0xC0000BBB
|
||||
PDH_INVALID_HANDLE = 0xC0000BBC
|
||||
PDH_INVALID_ARGUMENT = 0xC0000BBD // Required argument is missing or incorrect.
|
||||
PDH_FUNCTION_NOT_FOUND = 0xC0000BBE
|
||||
PDH_CSTATUS_NO_COUNTERNAME = 0xC0000BBF
|
||||
PDH_CSTATUS_BAD_COUNTERNAME = 0xC0000BC0 // Unable to parse the counter path. Check the format and syntax of the specified path.
|
||||
PDH_INVALID_BUFFER = 0xC0000BC1
|
||||
PDH_INSUFFICIENT_BUFFER = 0xC0000BC2
|
||||
PDH_CANNOT_CONNECT_MACHINE = 0xC0000BC3
|
||||
PDH_INVALID_PATH = 0xC0000BC4
|
||||
PDH_INVALID_INSTANCE = 0xC0000BC5
|
||||
PDH_INVALID_DATA = 0xC0000BC6 // specified counter does not contain valid data or a successful status code.
|
||||
PDH_NO_DIALOG_DATA = 0xC0000BC7
|
||||
PDH_CANNOT_READ_NAME_STRINGS = 0xC0000BC8
|
||||
PDH_LOG_FILE_CREATE_ERROR = 0xC0000BC9
|
||||
PDH_LOG_FILE_OPEN_ERROR = 0xC0000BCA
|
||||
PDH_LOG_TYPE_NOT_FOUND = 0xC0000BCB
|
||||
PDH_NO_MORE_DATA = 0xC0000BCC
|
||||
PDH_ENTRY_NOT_IN_LOG_FILE = 0xC0000BCD
|
||||
PDH_DATA_SOURCE_IS_LOG_FILE = 0xC0000BCE
|
||||
PDH_DATA_SOURCE_IS_REAL_TIME = 0xC0000BCF
|
||||
PDH_UNABLE_READ_LOG_HEADER = 0xC0000BD0
|
||||
PDH_FILE_NOT_FOUND = 0xC0000BD1
|
||||
PDH_FILE_ALREADY_EXISTS = 0xC0000BD2
|
||||
PDH_NOT_IMPLEMENTED = 0xC0000BD3
|
||||
PDH_STRING_NOT_FOUND = 0xC0000BD4
|
||||
PDH_UNABLE_MAP_NAME_FILES = 0x80000BD5
|
||||
PDH_UNKNOWN_LOG_FORMAT = 0xC0000BD6
|
||||
PDH_UNKNOWN_LOGSVC_COMMAND = 0xC0000BD7
|
||||
PDH_LOGSVC_QUERY_NOT_FOUND = 0xC0000BD8
|
||||
PDH_LOGSVC_NOT_OPENED = 0xC0000BD9
|
||||
PDH_WBEM_ERROR = 0xC0000BDA
|
||||
PDH_ACCESS_DENIED = 0xC0000BDB
|
||||
PDH_LOG_FILE_TOO_SMALL = 0xC0000BDC
|
||||
PDH_INVALID_DATASOURCE = 0xC0000BDD
|
||||
PDH_INVALID_SQLDB = 0xC0000BDE
|
||||
PDH_NO_COUNTERS = 0xC0000BDF
|
||||
PDH_SQL_ALLOC_FAILED = 0xC0000BE0
|
||||
PDH_SQL_ALLOCCON_FAILED = 0xC0000BE1
|
||||
PDH_SQL_EXEC_DIRECT_FAILED = 0xC0000BE2
|
||||
PDH_SQL_FETCH_FAILED = 0xC0000BE3
|
||||
PDH_SQL_ROWCOUNT_FAILED = 0xC0000BE4
|
||||
PDH_SQL_MORE_RESULTS_FAILED = 0xC0000BE5
|
||||
PDH_SQL_CONNECT_FAILED = 0xC0000BE6
|
||||
PDH_SQL_BIND_FAILED = 0xC0000BE7
|
||||
PDH_CANNOT_CONNECT_WMI_SERVER = 0xC0000BE8
|
||||
PDH_PLA_COLLECTION_ALREADY_RUNNING = 0xC0000BE9
|
||||
PDH_PLA_ERROR_SCHEDULE_OVERLAP = 0xC0000BEA
|
||||
PDH_PLA_COLLECTION_NOT_FOUND = 0xC0000BEB
|
||||
PDH_PLA_ERROR_SCHEDULE_ELAPSED = 0xC0000BEC
|
||||
PDH_PLA_ERROR_NOSTART = 0xC0000BED
|
||||
PDH_PLA_ERROR_ALREADY_EXISTS = 0xC0000BEE
|
||||
PDH_PLA_ERROR_TYPE_MISMATCH = 0xC0000BEF
|
||||
PDH_PLA_ERROR_FILEPATH = 0xC0000BF0
|
||||
PDH_PLA_SERVICE_ERROR = 0xC0000BF1
|
||||
PDH_PLA_VALIDATION_ERROR = 0xC0000BF2
|
||||
PDH_PLA_VALIDATION_WARNING = 0x80000BF3
|
||||
PDH_PLA_ERROR_NAME_TOO_LONG = 0xC0000BF4
|
||||
PDH_INVALID_SQL_LOG_FORMAT = 0xC0000BF5
|
||||
PDH_COUNTER_ALREADY_IN_QUERY = 0xC0000BF6
|
||||
PDH_BINARY_LOG_CORRUPT = 0xC0000BF7
|
||||
PDH_LOG_SAMPLE_TOO_SMALL = 0xC0000BF8
|
||||
PDH_OS_LATER_VERSION = 0xC0000BF9
|
||||
PDH_OS_EARLIER_VERSION = 0xC0000BFA
|
||||
PDH_INCORRECT_APPEND_TIME = 0xC0000BFB
|
||||
PDH_UNMATCHED_APPEND_COUNTER = 0xC0000BFC
|
||||
PDH_SQL_ALTER_DETAIL_FAILED = 0xC0000BFD
|
||||
PDH_QUERY_PERF_DATA_TIMEOUT = 0xC0000BFE
|
||||
)
|
||||
|
||||
// Formatting options for GetFormattedCounterValue().
|
||||
const (
|
||||
PDH_FMT_RAW = 0x00000010
|
||||
PDH_FMT_ANSI = 0x00000020
|
||||
PDH_FMT_UNICODE = 0x00000040
|
||||
PDH_FMT_LONG = 0x00000100 // Return data as a long int.
|
||||
PDH_FMT_DOUBLE = 0x00000200 // Return data as a double precision floating point real.
|
||||
PDH_FMT_LARGE = 0x00000400 // Return data as a 64 bit integer.
|
||||
PDH_FMT_NOSCALE = 0x00001000 // can be OR-ed: Do not apply the counter's default scaling factor.
|
||||
PDH_FMT_1000 = 0x00002000 // can be OR-ed: multiply the actual value by 1,000.
|
||||
PDH_FMT_NODATA = 0x00004000 // can be OR-ed: unknown what this is for, MSDN says nothing.
|
||||
PDH_FMT_NOCAP100 = 0x00008000 // can be OR-ed: do not cap values > 100.
|
||||
PERF_DETAIL_COSTLY = 0x00010000
|
||||
PERF_DETAIL_STANDARD = 0x0000FFFF
|
||||
)
|
||||
|
||||
type (
|
||||
PDH_HQUERY HANDLE // query handle
|
||||
PDH_HCOUNTER HANDLE // counter handle
|
||||
)
|
||||
|
||||
// Union specialization for double values
|
||||
type PDH_FMT_COUNTERVALUE_DOUBLE struct {
|
||||
CStatus uint32
|
||||
DoubleValue float64
|
||||
}
|
||||
|
||||
// Union specialization for 64 bit integer values
|
||||
type PDH_FMT_COUNTERVALUE_LARGE struct {
|
||||
CStatus uint32
|
||||
LargeValue int64
|
||||
}
|
||||
|
||||
// Union specialization for long values
|
||||
type PDH_FMT_COUNTERVALUE_LONG struct {
|
||||
CStatus uint32
|
||||
LongValue int32
|
||||
padding [4]byte
|
||||
}
|
||||
|
||||
// Union specialization for double values, used by PdhGetFormattedCounterArrayDouble()
|
||||
type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE struct {
|
||||
SzName *uint16 // pointer to a string
|
||||
FmtValue PDH_FMT_COUNTERVALUE_DOUBLE
|
||||
}
|
||||
|
||||
// Union specialization for 'large' values, used by PdhGetFormattedCounterArrayLarge()
|
||||
type PDH_FMT_COUNTERVALUE_ITEM_LARGE struct {
|
||||
SzName *uint16 // pointer to a string
|
||||
FmtValue PDH_FMT_COUNTERVALUE_LARGE
|
||||
}
|
||||
|
||||
// Union specialization for long values, used by PdhGetFormattedCounterArrayLong()
|
||||
type PDH_FMT_COUNTERVALUE_ITEM_LONG struct {
|
||||
SzName *uint16 // pointer to a string
|
||||
FmtValue PDH_FMT_COUNTERVALUE_LONG
|
||||
}
|
||||
|
||||
var (
|
||||
// Library
|
||||
libpdhDll *syscall.DLL
|
||||
|
||||
// Functions
|
||||
pdh_AddCounterW *syscall.Proc
|
||||
pdh_AddEnglishCounterW *syscall.Proc
|
||||
pdh_CloseQuery *syscall.Proc
|
||||
pdh_CollectQueryData *syscall.Proc
|
||||
pdh_GetFormattedCounterValue *syscall.Proc
|
||||
pdh_GetFormattedCounterArrayW *syscall.Proc
|
||||
pdh_OpenQuery *syscall.Proc
|
||||
pdh_ValidatePathW *syscall.Proc
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Library
|
||||
libpdhDll = syscall.MustLoadDLL("pdh.dll")
|
||||
|
||||
// Functions
|
||||
pdh_AddCounterW = libpdhDll.MustFindProc("PdhAddCounterW")
|
||||
pdh_AddEnglishCounterW, _ = libpdhDll.FindProc("PdhAddEnglishCounterW") // XXX: only supported on versions > Vista.
|
||||
pdh_CloseQuery = libpdhDll.MustFindProc("PdhCloseQuery")
|
||||
pdh_CollectQueryData = libpdhDll.MustFindProc("PdhCollectQueryData")
|
||||
pdh_GetFormattedCounterValue = libpdhDll.MustFindProc("PdhGetFormattedCounterValue")
|
||||
pdh_GetFormattedCounterArrayW = libpdhDll.MustFindProc("PdhGetFormattedCounterArrayW")
|
||||
pdh_OpenQuery = libpdhDll.MustFindProc("PdhOpenQuery")
|
||||
pdh_ValidatePathW = libpdhDll.MustFindProc("PdhValidatePathW")
|
||||
}
|
||||
|
||||
// Adds the specified counter to the query. This is the internationalized version. Preferably, use the
|
||||
// function PdhAddEnglishCounter instead. hQuery is the query handle, which has been fetched by PdhOpenQuery.
|
||||
// szFullCounterPath is a full, internationalized counter path (this will differ per Windows language version).
|
||||
// dwUserData is a 'user-defined value', which becomes part of the counter information. To retrieve this value
|
||||
// later, call PdhGetCounterInfo() and access dwQueryUserData of the PDH_COUNTER_INFO structure.
|
||||
//
|
||||
// Examples of szFullCounterPath (in an English version of Windows):
|
||||
//
|
||||
// \\Processor(_Total)\\% Idle Time
|
||||
// \\Processor(_Total)\\% Processor Time
|
||||
// \\LogicalDisk(C:)\% Free Space
|
||||
//
|
||||
// To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility,
|
||||
// the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a
|
||||
// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an
|
||||
// interface to the available counters, and can be found at the following key:
|
||||
//
|
||||
// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage
|
||||
//
|
||||
// This registry key contains several values as follows:
|
||||
//
|
||||
// 1
|
||||
// 1847
|
||||
// 2
|
||||
// System
|
||||
// 4
|
||||
// Memory
|
||||
// 6
|
||||
// % Processor Time
|
||||
// ... many, many more
|
||||
//
|
||||
// Somehow, these numeric values can be used as szFullCounterPath too:
|
||||
//
|
||||
// \2\6 will correspond to \\System\% Processor Time
|
||||
//
|
||||
// The typeperf command may also be pretty easy. To find all performance counters, simply execute:
|
||||
//
|
||||
// typeperf -qx
|
||||
func PdhAddCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 {
|
||||
ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath)
|
||||
ret, _, _ := pdh_AddCounterW.Call(
|
||||
uintptr(hQuery),
|
||||
uintptr(unsafe.Pointer(ptxt)),
|
||||
dwUserData,
|
||||
uintptr(unsafe.Pointer(phCounter)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Adds the specified language-neutral counter to the query. See the PdhAddCounter function. This function only exists on
|
||||
// Windows versions higher than Vista.
|
||||
func PdhAddEnglishCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 {
|
||||
if pdh_AddEnglishCounterW == nil {
|
||||
return ERROR_INVALID_FUNCTION
|
||||
}
|
||||
|
||||
ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath)
|
||||
ret, _, _ := pdh_AddEnglishCounterW.Call(
|
||||
uintptr(hQuery),
|
||||
uintptr(unsafe.Pointer(ptxt)),
|
||||
dwUserData,
|
||||
uintptr(unsafe.Pointer(phCounter)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Closes all counters contained in the specified query, closes all handles related to the query,
|
||||
// and frees all memory associated with the query.
|
||||
func PdhCloseQuery(hQuery PDH_HQUERY) uint32 {
|
||||
ret, _, _ := pdh_CloseQuery.Call(uintptr(hQuery))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Collects the current raw data value for all counters in the specified query and updates the status
|
||||
// code of each counter. With some counters, this function needs to be repeatedly called before the value
|
||||
// of the counter can be extracted with PdhGetFormattedCounterValue(). For example, the following code
|
||||
// requires at least two calls:
|
||||
//
|
||||
// var handle win.PDH_HQUERY
|
||||
// var counterHandle win.PDH_HCOUNTER
|
||||
// ret := win.PdhOpenQuery(0, 0, &handle)
|
||||
// ret = win.PdhAddEnglishCounter(handle, "\\Processor(_Total)\\% Idle Time", 0, &counterHandle)
|
||||
// var derp win.PDH_FMT_COUNTERVALUE_DOUBLE
|
||||
//
|
||||
// ret = win.PdhCollectQueryData(handle)
|
||||
// fmt.Printf("Collect return code is %x\n", ret) // return code will be PDH_CSTATUS_INVALID_DATA
|
||||
// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp)
|
||||
//
|
||||
// ret = win.PdhCollectQueryData(handle)
|
||||
// fmt.Printf("Collect return code is %x\n", ret) // return code will be ERROR_SUCCESS
|
||||
// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp)
|
||||
//
|
||||
// The PdhCollectQueryData will return an error in the first call because it needs two values for
|
||||
// displaying the correct data for the processor idle time. The second call will have a 0 return code.
|
||||
func PdhCollectQueryData(hQuery PDH_HQUERY) uint32 {
|
||||
ret, _, _ := pdh_CollectQueryData.Call(uintptr(hQuery))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Formats the given hCounter using a 'double'. The result is set into the specialized union struct pValue.
|
||||
// This function does not directly translate to a Windows counterpart due to union specialization tricks.
|
||||
func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_DOUBLE) uint32 {
|
||||
ret, _, _ := pdh_GetFormattedCounterValue.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_DOUBLE),
|
||||
uintptr(unsafe.Pointer(lpdwType)),
|
||||
uintptr(unsafe.Pointer(pValue)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Returns an array of formatted counter values. Use this function when you want to format the counter values of a
|
||||
// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE.
|
||||
// An example of how this function can be used:
|
||||
//
|
||||
// okPath := "\\Process(*)\\% Processor Time" // notice the wildcard * character
|
||||
//
|
||||
// // ommitted all necessary stuff ...
|
||||
//
|
||||
// var bufSize uint32
|
||||
// var bufCount uint32
|
||||
// var size uint32 = uint32(unsafe.Sizeof(win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{}))
|
||||
// var emptyBuf [1]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr.
|
||||
//
|
||||
// for {
|
||||
// // collect
|
||||
// ret := win.PdhCollectQueryData(queryHandle)
|
||||
// if ret == win.ERROR_SUCCESS {
|
||||
// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN.
|
||||
// if ret == win.PDH_MORE_DATA {
|
||||
// filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size)
|
||||
// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &filledBuf[0])
|
||||
// for i := 0; i < int(bufCount); i++ {
|
||||
// c := filledBuf[i]
|
||||
// var s string = win.UTF16PtrToString(c.SzName)
|
||||
// fmt.Printf("Index %d -> %s, value %v\n", i, s, c.FmtValue.DoubleValue)
|
||||
// }
|
||||
//
|
||||
// filledBuf = nil
|
||||
// // Need to at least set bufSize to zero, because if not, the function will not
|
||||
// // return PDH_MORE_DATA and will not set the bufSize.
|
||||
// bufCount = 0
|
||||
// bufSize = 0
|
||||
// }
|
||||
//
|
||||
// time.Sleep(2000 * time.Millisecond)
|
||||
// }
|
||||
// }
|
||||
func PdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 {
|
||||
ret, _, _ := pdh_GetFormattedCounterArrayW.Call(
|
||||
uintptr(hCounter),
|
||||
uintptr(PDH_FMT_DOUBLE),
|
||||
uintptr(unsafe.Pointer(lpdwBufferSize)),
|
||||
uintptr(unsafe.Pointer(lpdwBufferCount)),
|
||||
uintptr(unsafe.Pointer(itemBuffer)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Creates a new query that is used to manage the collection of performance data.
|
||||
// szDataSource is a null terminated string that specifies the name of the log file from which to
|
||||
// retrieve the performance data. If 0, performance data is collected from a real-time data source.
|
||||
// dwUserData is a user-defined value to associate with this query. To retrieve the user data later,
|
||||
// call PdhGetCounterInfo and access dwQueryUserData of the PDH_COUNTER_INFO structure. phQuery is
|
||||
// the handle to the query, and must be used in subsequent calls. This function returns a PDH_
|
||||
// constant error code, or ERROR_SUCCESS if the call succeeded.
|
||||
func PdhOpenQuery(szDataSource uintptr, dwUserData uintptr, phQuery *PDH_HQUERY) uint32 {
|
||||
ret, _, _ := pdh_OpenQuery.Call(
|
||||
szDataSource,
|
||||
dwUserData,
|
||||
uintptr(unsafe.Pointer(phQuery)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
// Validates a path. Will return ERROR_SUCCESS when ok, or PDH_CSTATUS_BAD_COUNTERNAME when the path is
|
||||
// erroneous.
|
||||
func PdhValidatePath(path string) uint32 {
|
||||
ptxt, _ := syscall.UTF16PtrFromString(path)
|
||||
ret, _, _ := pdh_ValidatePathW.Call(uintptr(unsafe.Pointer(ptxt)))
|
||||
|
||||
return uint32(ret)
|
||||
}
|
||||
|
||||
func UTF16PtrToString(s *uint16) string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
return syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(s))[0:])
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
"github.com/lxn/win"
|
||||
)
|
||||
|
||||
var sampleConfig string = `
|
||||
@@ -102,8 +103,8 @@ type item struct {
|
||||
instance string
|
||||
measurement string
|
||||
include_total bool
|
||||
handle PDH_HQUERY
|
||||
counterHandle PDH_HCOUNTER
|
||||
handle win.PDH_HQUERY
|
||||
counterHandle win.PDH_HCOUNTER
|
||||
}
|
||||
|
||||
var sanitizedChars = strings.NewReplacer("/sec", "_persec", "/Sec", "_persec",
|
||||
@@ -112,10 +113,14 @@ var sanitizedChars = strings.NewReplacer("/sec", "_persec", "/Sec", "_persec",
|
||||
func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName string, counter string, instance string,
|
||||
measurement string, include_total bool) {
|
||||
|
||||
var handle PDH_HQUERY
|
||||
var counterHandle PDH_HCOUNTER
|
||||
ret := PdhOpenQuery(0, 0, &handle)
|
||||
ret = PdhAddCounter(handle, query, 0, &counterHandle)
|
||||
var handle win.PDH_HQUERY
|
||||
var counterHandle win.PDH_HCOUNTER
|
||||
ret := win.PdhOpenQuery(0, 0, &handle)
|
||||
if m.PreVistaSupport {
|
||||
ret = win.PdhAddCounter(handle, query, 0, &counterHandle)
|
||||
} else {
|
||||
ret = win.PdhAddEnglishCounter(handle, query, 0, &counterHandle)
|
||||
}
|
||||
_ = ret
|
||||
|
||||
temp := &item{query, objectName, counter, instance, measurement,
|
||||
@@ -130,14 +135,14 @@ func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName s
|
||||
}
|
||||
|
||||
func (m *Win_PerfCounters) InvalidObject(exists uint32, query string, PerfObject perfobject, instance string, counter string) error {
|
||||
if exists == 3221228472 { // PDH_CSTATUS_NO_OBJECT
|
||||
if exists == 3221228472 { // win.PDH_CSTATUS_NO_OBJECT
|
||||
if PerfObject.FailOnMissing {
|
||||
err := errors.New("Performance object does not exist")
|
||||
return err
|
||||
} else {
|
||||
fmt.Printf("Performance Object '%s' does not exist in query: %s\n", PerfObject.ObjectName, query)
|
||||
}
|
||||
} else if exists == 3221228473 { // PDH_CSTATUS_NO_COUNTER
|
||||
} else if exists == 3221228473 { //win.PDH_CSTATUS_NO_COUNTER
|
||||
|
||||
if PerfObject.FailOnMissing {
|
||||
err := errors.New("Counter in Performance object does not exist")
|
||||
@@ -145,7 +150,7 @@ func (m *Win_PerfCounters) InvalidObject(exists uint32, query string, PerfObject
|
||||
} else {
|
||||
fmt.Printf("Counter '%s' does not exist in query: %s\n", counter, query)
|
||||
}
|
||||
} else if exists == 2147485649 { // PDH_CSTATUS_NO_INSTANCE
|
||||
} else if exists == 2147485649 { //win.PDH_CSTATUS_NO_INSTANCE
|
||||
if PerfObject.FailOnMissing {
|
||||
err := errors.New("Instance in Performance object does not exist")
|
||||
return err
|
||||
@@ -188,9 +193,9 @@ func (m *Win_PerfCounters) ParseConfig(metrics *itemList) error {
|
||||
query = "\\" + objectname + "(" + instance + ")\\" + counter
|
||||
}
|
||||
|
||||
var exists uint32 = PdhValidatePath(query)
|
||||
var exists uint32 = win.PdhValidatePath(query)
|
||||
|
||||
if exists == ERROR_SUCCESS {
|
||||
if exists == win.ERROR_SUCCESS {
|
||||
if m.PrintValid {
|
||||
fmt.Printf("Valid: %s\n", query)
|
||||
}
|
||||
@@ -217,7 +222,7 @@ func (m *Win_PerfCounters) Cleanup(metrics *itemList) {
|
||||
// Cleanup
|
||||
|
||||
for _, metric := range metrics.items {
|
||||
ret := PdhCloseQuery(metric.handle)
|
||||
ret := win.PdhCloseQuery(metric.handle)
|
||||
_ = ret
|
||||
}
|
||||
}
|
||||
@@ -226,7 +231,7 @@ func (m *Win_PerfCounters) CleanupTestMode() {
|
||||
// Cleanup for the testmode.
|
||||
|
||||
for _, metric := range gItemList {
|
||||
ret := PdhCloseQuery(metric.handle)
|
||||
ret := win.PdhCloseQuery(metric.handle)
|
||||
_ = ret
|
||||
}
|
||||
}
|
||||
@@ -255,26 +260,26 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error {
|
||||
|
||||
var bufSize uint32
|
||||
var bufCount uint32
|
||||
var size uint32 = uint32(unsafe.Sizeof(PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{}))
|
||||
var emptyBuf [1]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr.
|
||||
var size uint32 = uint32(unsafe.Sizeof(win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{}))
|
||||
var emptyBuf [1]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr.
|
||||
|
||||
// For iterate over the known metrics and get the samples.
|
||||
for _, metric := range gItemList {
|
||||
// collect
|
||||
ret := PdhCollectQueryData(metric.handle)
|
||||
if ret == ERROR_SUCCESS {
|
||||
ret = PdhGetFormattedCounterArrayDouble(metric.counterHandle, &bufSize,
|
||||
ret := win.PdhCollectQueryData(metric.handle)
|
||||
if ret == win.ERROR_SUCCESS {
|
||||
ret = win.PdhGetFormattedCounterArrayDouble(metric.counterHandle, &bufSize,
|
||||
&bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN.
|
||||
if ret == PDH_MORE_DATA {
|
||||
filledBuf := make([]PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size)
|
||||
if ret == win.PDH_MORE_DATA {
|
||||
filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size)
|
||||
if len(filledBuf) == 0 {
|
||||
continue
|
||||
}
|
||||
ret = PdhGetFormattedCounterArrayDouble(metric.counterHandle,
|
||||
ret = win.PdhGetFormattedCounterArrayDouble(metric.counterHandle,
|
||||
&bufSize, &bufCount, &filledBuf[0])
|
||||
for i := 0; i < int(bufCount); i++ {
|
||||
c := filledBuf[i]
|
||||
var s string = UTF16PtrToString(c.SzName)
|
||||
var s string = win.UTF16PtrToString(c.SzName)
|
||||
|
||||
var add bool
|
||||
|
||||
|
||||
@@ -9,6 +9,6 @@ It requires a `servers` name.
|
||||
```toml
|
||||
# Send telegraf metrics to graylog(s)
|
||||
[[outputs.graylog]]
|
||||
## UDP endpoint for your graylog instance(s).
|
||||
## Udp endpoint for your graylog instance.
|
||||
servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
|
||||
```
|
||||
|
||||
@@ -154,7 +154,7 @@ type Graylog struct {
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
## UDP endpoint for your graylog instance.
|
||||
## Udp endpoint for your graylog instance.
|
||||
servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
|
||||
`
|
||||
|
||||
@@ -213,7 +213,7 @@ func serialize(metric telegraf.Metric) ([]string, error) {
|
||||
m := make(map[string]interface{})
|
||||
m["version"] = "1.1"
|
||||
m["timestamp"] = metric.UnixNano() / 1000000000
|
||||
m["short_message"] = "telegraf"
|
||||
m["short_message"] = " "
|
||||
m["name"] = metric.Name()
|
||||
|
||||
if host, ok := metric.Tags()["host"]; ok {
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package kinesis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@@ -11,7 +13,6 @@ import (
|
||||
"github.com/influxdata/telegraf"
|
||||
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"github.com/influxdata/telegraf/plugins/serializers"
|
||||
)
|
||||
|
||||
type KinesisOutput struct {
|
||||
@@ -25,10 +26,9 @@ type KinesisOutput struct {
|
||||
|
||||
StreamName string `toml:"streamname"`
|
||||
PartitionKey string `toml:"partitionkey"`
|
||||
Format string `toml:"format"`
|
||||
Debug bool `toml:"debug"`
|
||||
svc *kinesis.Kinesis
|
||||
|
||||
serializer serializers.Serializer
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
@@ -54,13 +54,9 @@ var sampleConfig = `
|
||||
streamname = "StreamName"
|
||||
## PartitionKey as used for sharding data.
|
||||
partitionkey = "PartitionKey"
|
||||
|
||||
## Data format to output.
|
||||
## Each data format has it's own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
|
||||
data_format = "influx"
|
||||
|
||||
## format of the Data payload in the kinesis PutRecord, supported
|
||||
## String and Custom.
|
||||
format = "string"
|
||||
## debug will show upstream aws messages.
|
||||
debug = false
|
||||
`
|
||||
@@ -129,8 +125,16 @@ func (k *KinesisOutput) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) {
|
||||
k.serializer = serializer
|
||||
func FormatMetric(k *KinesisOutput, point telegraf.Metric) (string, error) {
|
||||
if k.Format == "string" {
|
||||
return point.String(), nil
|
||||
} else {
|
||||
m := fmt.Sprintf("%+v,%+v,%+v",
|
||||
point.Name(),
|
||||
point.Tags(),
|
||||
point.String())
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
|
||||
func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Duration {
|
||||
@@ -157,7 +161,7 @@ func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Du
|
||||
}
|
||||
|
||||
func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
|
||||
var sz uint32
|
||||
var sz uint32 = 0
|
||||
|
||||
if len(metrics) == 0 {
|
||||
return nil
|
||||
@@ -165,29 +169,23 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
|
||||
|
||||
r := []*kinesis.PutRecordsRequestEntry{}
|
||||
|
||||
for _, metric := range metrics {
|
||||
sz++
|
||||
|
||||
values, err := k.serializer.Serialize(metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range metrics {
|
||||
atomic.AddUint32(&sz, 1)
|
||||
|
||||
metric, _ := FormatMetric(k, p)
|
||||
d := kinesis.PutRecordsRequestEntry{
|
||||
Data: values,
|
||||
Data: []byte(metric),
|
||||
PartitionKey: aws.String(k.PartitionKey),
|
||||
}
|
||||
|
||||
r = append(r, &d)
|
||||
|
||||
if sz == 500 {
|
||||
// Max Messages Per PutRecordRequest is 500
|
||||
elapsed := writekinesis(k, r)
|
||||
log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed)
|
||||
sz = 0
|
||||
atomic.StoreUint32(&sz, 0)
|
||||
r = nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
writekinesis(k, r)
|
||||
|
||||
39
plugins/outputs/kinesis/kinesis_test.go
Normal file
39
plugins/outputs/kinesis/kinesis_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package kinesis
|
||||
|
||||
import (
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFormatMetric(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
k := &KinesisOutput{
|
||||
Format: "string",
|
||||
}
|
||||
|
||||
p := testutil.MockMetrics()[0]
|
||||
|
||||
valid_string := "test1,tag1=value1 value=1 1257894000000000000\n"
|
||||
func_string, err := FormatMetric(k, p)
|
||||
|
||||
if func_string != valid_string {
|
||||
t.Error("Expected ", valid_string)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
k = &KinesisOutput{
|
||||
Format: "custom",
|
||||
}
|
||||
|
||||
valid_custom := "test1,map[tag1:value1],test1,tag1=value1 value=1 1257894000000000000\n"
|
||||
func_custom, err := FormatMetric(k, p)
|
||||
|
||||
if func_custom != valid_custom {
|
||||
t.Error("Expected ", valid_custom)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
|
||||
// Librato structure for configuration and client
|
||||
type Librato struct {
|
||||
APIUser string `toml:"api_user"`
|
||||
APIToken string `toml:"api_token"`
|
||||
APIUser string
|
||||
APIToken string
|
||||
Debug bool
|
||||
SourceTag string // Deprecated, keeping for backward-compatibility
|
||||
Timeout internal.Duration
|
||||
|
||||
@@ -25,9 +25,6 @@ var sampleConfig = `
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
## client ID, if not set a random ID is generated
|
||||
# client_id = ""
|
||||
|
||||
## Optional SSL Config
|
||||
# ssl_ca = "/etc/telegraf/ca.pem"
|
||||
# ssl_cert = "/etc/telegraf/cert.pem"
|
||||
@@ -49,8 +46,7 @@ type MQTT struct {
|
||||
Database string
|
||||
Timeout internal.Duration
|
||||
TopicPrefix string
|
||||
QoS int `toml:"qos"`
|
||||
ClientID string `toml:"client_id"`
|
||||
QoS int `toml:"qos"`
|
||||
|
||||
// Path to CA file
|
||||
SSLCA string `toml:"ssl_ca"`
|
||||
@@ -159,11 +155,7 @@ func (m *MQTT) publish(topic string, body []byte) error {
|
||||
func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
|
||||
opts := paho.NewClientOptions()
|
||||
|
||||
if m.ClientID != "" {
|
||||
opts.SetClientID(m.ClientID)
|
||||
} else {
|
||||
opts.SetClientID("Telegraf-Output-" + internal.RandomString(5))
|
||||
}
|
||||
opts.SetClientID("Telegraf-Output-" + internal.RandomString(5))
|
||||
|
||||
tlsCfg, err := internal.GetTLSConfig(
|
||||
m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify)
|
||||
|
||||
@@ -90,7 +90,7 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error {
|
||||
|
||||
if u.Scheme == "" || u.Scheme == "tcp" {
|
||||
return o.WriteTelnet(metrics, u)
|
||||
} else if u.Scheme == "http" || u.Scheme == "https" {
|
||||
} else if u.Scheme == "http" {
|
||||
return o.WriteHttp(metrics, u)
|
||||
} else {
|
||||
return fmt.Errorf("Unknown scheme in host parameter.")
|
||||
@@ -101,8 +101,6 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
|
||||
http := openTSDBHttp{
|
||||
Host: u.Host,
|
||||
Port: o.Port,
|
||||
Scheme: u.Scheme,
|
||||
User: u.User,
|
||||
BatchSize: o.HttpBatchSize,
|
||||
Debug: o.Debug,
|
||||
}
|
||||
@@ -157,15 +155,6 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
|
||||
tags := ToLineFormat(cleanTags(m.Tags()))
|
||||
|
||||
for fieldName, value := range m.Fields() {
|
||||
switch value.(type) {
|
||||
case int64:
|
||||
case uint64:
|
||||
case float64:
|
||||
default:
|
||||
log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value)
|
||||
continue
|
||||
}
|
||||
|
||||
metricValue, buildError := buildValue(value)
|
||||
if buildError != nil {
|
||||
log.Printf("E! OpenTSDB: %s\n", buildError.Error())
|
||||
|
||||
@@ -23,8 +23,6 @@ type HttpMetric struct {
|
||||
type openTSDBHttp struct {
|
||||
Host string
|
||||
Port int
|
||||
Scheme string
|
||||
User *url.Userinfo
|
||||
BatchSize int
|
||||
Debug bool
|
||||
|
||||
@@ -120,8 +118,7 @@ func (o *openTSDBHttp) flush() error {
|
||||
o.body.close()
|
||||
|
||||
u := url.URL{
|
||||
Scheme: o.Scheme,
|
||||
User: o.User,
|
||||
Scheme: "http",
|
||||
Host: fmt.Sprintf("%s:%d", o.Host, o.Port),
|
||||
Path: "/api/put",
|
||||
}
|
||||
|
||||
@@ -103,22 +103,10 @@ type JSONFlattener struct {
|
||||
Fields map[string]interface{}
|
||||
}
|
||||
|
||||
// FlattenJSON flattens nested maps/interfaces into a fields map (ignoring bools and string)
|
||||
// FlattenJSON flattens nested maps/interfaces into a fields map
|
||||
func (f *JSONFlattener) FlattenJSON(
|
||||
fieldname string,
|
||||
v interface{}) error {
|
||||
if f.Fields == nil {
|
||||
f.Fields = make(map[string]interface{})
|
||||
}
|
||||
return f.FullFlattenJSON(fieldname, v, false, false)
|
||||
}
|
||||
|
||||
// FullFlattenJSON flattens nested maps/interfaces into a fields map (including bools and string)
|
||||
func (f *JSONFlattener) FullFlattenJSON(
|
||||
fieldname string,
|
||||
v interface{},
|
||||
convertString bool,
|
||||
convertBool bool,
|
||||
) error {
|
||||
if f.Fields == nil {
|
||||
f.Fields = make(map[string]interface{})
|
||||
@@ -127,7 +115,7 @@ func (f *JSONFlattener) FullFlattenJSON(
|
||||
switch t := v.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range t {
|
||||
err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool)
|
||||
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -135,28 +123,15 @@ func (f *JSONFlattener) FullFlattenJSON(
|
||||
case []interface{}:
|
||||
for i, v := range t {
|
||||
k := strconv.Itoa(i)
|
||||
err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool)
|
||||
err := f.FlattenJSON(fieldname+"_"+k+"_", v)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
f.Fields[fieldname] = t
|
||||
case string:
|
||||
if convertString {
|
||||
f.Fields[fieldname] = v.(string)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
case bool:
|
||||
if convertBool {
|
||||
f.Fields[fieldname] = v.(bool)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
case nil:
|
||||
case bool, string, nil:
|
||||
// ignored types
|
||||
fmt.Println("json parser ignoring " + fieldname)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)",
|
||||
|
||||
Reference in New Issue
Block a user