Compare commits
68 Commits
master
...
release-1.
Author | SHA1 | Date |
---|---|---|
Daniel Nelson | 1e51969813 | |
Daniel Nelson | 51b097a7c6 | |
Daniel Nelson | 77dfb8c9c5 | |
Jonas Hahnfeld | 1398f8e678 | |
Daniel Nelson | d96483bffb | |
Daniel Nelson | 5e534676a0 | |
Dennis Schön | 9329200afa | |
Daniel Nelson | 645b8b905d | |
dilshatm | ea7d884c09 | |
Daniel Nelson | 7f94cb58e4 | |
Daniel Nelson | d8f2d4af0f | |
Daniel Nelson | d8dae1b1ab | |
Daniel Nelson | 770cf4e0b6 | |
efficks | 8cb5391f4e | |
Daniel Nelson | c5ddb65ad9 | |
Philipp Weber | d671299e96 | |
Daniel Nelson | f59231941f | |
Daniel Nelson | 100bdfba6c | |
Daniel Nelson | 67440c95bb | |
Daniel Nelson | 39de63d03c | |
Daniel Nelson | 56edd339e7 | |
Daniel Nelson | df768f83af | |
Pierre Tessier | 8733d3826a | |
Daniel Nelson | 2bb97154db | |
Daniel Nelson | a8d9e458ab | |
Daniel Nelson | b464adb08c | |
Daniel Nelson | 4bd67824ae | |
Daniel Nelson | f5894a6a2f | |
Daniel Nelson | 1790b26651 | |
Piotr Popieluch | bb3ee1fd39 | |
Daniel Nelson | 82df5bf2d8 | |
Piotr Popieluch | 8b566b2b9f | |
Daniel Nelson | 059a751a71 | |
Michael Boudreau | dcaa0ca8db | |
Daniel Nelson | 8777e32d9f | |
Noah Crowley | 667940afac | |
Daniel Nelson | 0605af7c99 | |
Daniel Nelson | 4e89c17c0f | |
Daniel Nelson | 45b7db7de1 | |
Daniel Nelson | cc478f035e | |
Daniel Nelson | fe6239cf9f | |
Daniel Nelson | 865917f523 | |
kerams | 4aa8d72644 | |
Daniel Nelson | 384ef6af6b | |
Daniel Nelson | 07985e6524 | |
Daniel Nelson | f8597f78f4 | |
Daniel Nelson | 83faea7a31 | |
Daniel Nelson | 223bbf0df7 | |
Daniel Nelson | 55f35f291d | |
Daniel Nelson | 6852231c1b | |
Daniel Nelson | ce4ca43a5d | |
timhallinflux | 5d6622eb44 | |
Daniel Nelson | a1668bbf9a | |
Daniel Nelson | fe91c779e9 | |
Daniel Nelson | 425b6f7d63 | |
Ildar Svetlov | c322ddb4b0 | |
Daniel Nelson | 648d3bde33 | |
Daniel Nelson | d8da77cb42 | |
Ted Zlatanov | fdb04702eb | |
Steve Banik | ecf43f4986 | |
Daniel Nelson | e307e92e86 | |
Daniel Nelson | 8d4a09c3ea | |
Daniel Nelson | fd964bd4eb | |
Daniel Nelson | 994e75f1f0 | |
Daniel Nelson | 2e2efafbfc | |
Daniel Nelson | 39537ed86e | |
Daniel Nelson | 558ce25c94 | |
Daniel Nelson | 0438f412a9 |
46
CHANGELOG.md
46
CHANGELOG.md
|
@ -1,4 +1,42 @@
|
||||||
## v1.5 [unreleased]
|
## v1.5.3 [2018-03-14]
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#3729](https://github.com/influxdata/telegraf/issues/3729): Set path to / if HOST_MOUNT_PREFIX matches full path.
|
||||||
|
- [#3739](https://github.com/influxdata/telegraf/issues/3739): Remove userinfo from url tag in prometheus input.
|
||||||
|
- [#3778](https://github.com/influxdata/telegraf/issues/3778): Fix ping plugin not reporting zero durations.
|
||||||
|
- [#3807](https://github.com/influxdata/telegraf/issues/3807): Fix memory leak in postgresql_extensible.
|
||||||
|
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Disable keepalive in mqtt output to prevent deadlock.
|
||||||
|
- [#3786](https://github.com/influxdata/telegraf/pull/3786): Fix collation difference in sqlserver input.
|
||||||
|
- [#3871](https://github.com/influxdata/telegraf/pull/3871): Fix uptime metric in passenger input plugin.
|
||||||
|
- [#3851](https://github.com/influxdata/telegraf/issues/3851): Add output of stderr in case of error to exec log message.
|
||||||
|
|
||||||
|
## v1.5.2 [2018-01-30]
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#3684](https://github.com/influxdata/telegraf/pull/3684): Ignore empty lines in Graphite plaintext.
|
||||||
|
- [#3604](https://github.com/influxdata/telegraf/issues/3604): Fix index out of bounds error in solr input plugin.
|
||||||
|
- [#3680](https://github.com/influxdata/telegraf/pull/3680): Reconnect before sending graphite metrics if disconnected.
|
||||||
|
- [#3693](https://github.com/influxdata/telegraf/pull/3693): Align aggregator period with internal ticker to avoid skipping metrics.
|
||||||
|
- [#3629](https://github.com/influxdata/telegraf/issues/3629): Fix a potential deadlock when using aggregators.
|
||||||
|
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Limit wait time for writes in mqtt output.
|
||||||
|
- [#3698](https://github.com/influxdata/telegraf/issues/3698): Revert change in graphite output where dot in field key was replaced by underscore.
|
||||||
|
- [#3710](https://github.com/influxdata/telegraf/issues/3710): Add timeout to wavefront output write.
|
||||||
|
- [#3725](https://github.com/influxdata/telegraf/issues/3725): Exclude master_replid fields from redis input.
|
||||||
|
|
||||||
|
## v1.5.1 [2018-01-10]
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- [#3624](https://github.com/influxdata/telegraf/pull/3624): Fix name error in jolokia2_agent sample config.
|
||||||
|
- [#3625](https://github.com/influxdata/telegraf/pull/3625): Fix DC/OS login expiration time.
|
||||||
|
- [#3593](https://github.com/influxdata/telegraf/pull/3593): Set Content-Type charset in influxdb output and allow it be overridden.
|
||||||
|
- [#3594](https://github.com/influxdata/telegraf/pull/3594): Document permissions setup for postfix input.
|
||||||
|
- [#3633](https://github.com/influxdata/telegraf/pull/3633): Fix deliver_get field in rabbitmq input.
|
||||||
|
- [#3607](https://github.com/influxdata/telegraf/issues/3607): Escape environment variables during config toml parsing.
|
||||||
|
|
||||||
|
## v1.5 [2017-12-14]
|
||||||
|
|
||||||
### New Plugins
|
### New Plugins
|
||||||
- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno
|
- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno
|
||||||
|
@ -9,7 +47,7 @@
|
||||||
- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah
|
- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah
|
||||||
- [opensmtpd](./plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer
|
- [opensmtpd](./plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer
|
||||||
- [particle](./plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs
|
- [particle](./plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs
|
||||||
- [pf](./plugins/inputs/pf/README.md) Thanks to @nferch
|
- [pf](./plugins/inputs/pf/README.md) - Thanks to @nferch
|
||||||
- [postfix](./plugins/inputs/postfix/README.md) - Thanks to @phemmer
|
- [postfix](./plugins/inputs/postfix/README.md) - Thanks to @phemmer
|
||||||
- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen
|
- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen
|
||||||
- [solr](./plugins/inputs/solr/README.md) - Thanks to @ljagiello
|
- [solr](./plugins/inputs/solr/README.md) - Thanks to @ljagiello
|
||||||
|
@ -78,6 +116,7 @@
|
||||||
- [#3140](https://github.com/influxdata/telegraf/pull/3140): Add support for glob patterns in net input plugin.
|
- [#3140](https://github.com/influxdata/telegraf/pull/3140): Add support for glob patterns in net input plugin.
|
||||||
- [#3405](https://github.com/influxdata/telegraf/pull/3405): Add input plugin for OpenBSD/FreeBSD pf.
|
- [#3405](https://github.com/influxdata/telegraf/pull/3405): Add input plugin for OpenBSD/FreeBSD pf.
|
||||||
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
|
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
|
||||||
|
- [#3530](https://github.com/influxdata/telegraf/pull/3530): Support I (idle) process state on procfs+Linux.
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
|
||||||
|
@ -92,6 +131,9 @@
|
||||||
- [#3263](https://github.com/influxdata/telegraf/issues/3263): Fix snmp-tools output parsing with Windows EOLs.
|
- [#3263](https://github.com/influxdata/telegraf/issues/3263): Fix snmp-tools output parsing with Windows EOLs.
|
||||||
- [#3447](https://github.com/influxdata/telegraf/issues/3447): Add shadow-utils dependency to rpm package.
|
- [#3447](https://github.com/influxdata/telegraf/issues/3447): Add shadow-utils dependency to rpm package.
|
||||||
- [#3448](https://github.com/influxdata/telegraf/issues/3448): Use deb-systemd-invoke to restart service.
|
- [#3448](https://github.com/influxdata/telegraf/issues/3448): Use deb-systemd-invoke to restart service.
|
||||||
|
- [#3553](https://github.com/influxdata/telegraf/issues/3553): Fix kafka_consumer outside range of offsets error.
|
||||||
|
- [#3568](https://github.com/influxdata/telegraf/issues/3568): Fix separation of multiple prometheus_client outputs.
|
||||||
|
- [#3577](https://github.com/influxdata/telegraf/issues/3577): Don't add system input uptime_format as a counter.
|
||||||
|
|
||||||
## v1.4.5 [2017-12-01]
|
## v1.4.5 [2017-12-01]
|
||||||
|
|
||||||
|
|
4
Godeps
4
Godeps
|
@ -4,7 +4,7 @@ github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||||
github.com/bsm/sarama-cluster ccdc0803695fbce22f1706d04ded46cd518fd832
|
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
|
||||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||||
|
@ -65,7 +65,7 @@ github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||||
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
|
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
|
||||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||||
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
|
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -88,7 +88,7 @@ docker-run:
|
||||||
-d cobaugh/openldap-alpine
|
-d cobaugh/openldap-alpine
|
||||||
docker run --name cratedb \
|
docker run --name cratedb \
|
||||||
-p "6543:5432" \
|
-p "6543:5432" \
|
||||||
-d crate crate \
|
-d crate:2.2 \
|
||||||
-Cnetwork.host=0.0.0.0 \
|
-Cnetwork.host=0.0.0.0 \
|
||||||
-Ctransport.host=localhost \
|
-Ctransport.host=localhost \
|
||||||
-Clicense.enterprise=false
|
-Clicense.enterprise=false
|
||||||
|
@ -116,12 +116,6 @@ docker-run-circle:
|
||||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||||
-p "389:389" -p "636:636" \
|
-p "389:389" -p "636:636" \
|
||||||
-d cobaugh/openldap-alpine
|
-d cobaugh/openldap-alpine
|
||||||
docker run --name cratedb \
|
|
||||||
-p "6543:5432" \
|
|
||||||
-d crate crate \
|
|
||||||
-Cnetwork.host=0.0.0.0 \
|
|
||||||
-Ctransport.host=localhost \
|
|
||||||
-Clicense.enterprise=false
|
|
||||||
|
|
||||||
docker-kill:
|
docker-kill:
|
||||||
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||||
|
|
|
@ -308,7 +308,13 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
|
||||||
metrics = processor.Apply(metrics...)
|
metrics = processor.Apply(metrics...)
|
||||||
}
|
}
|
||||||
for _, m := range metrics {
|
for _, m := range metrics {
|
||||||
outMetricC <- m
|
for i, o := range a.Config.Outputs {
|
||||||
|
if i == len(a.Config.Outputs)-1 {
|
||||||
|
o.AddMetric(m)
|
||||||
|
} else {
|
||||||
|
o.AddMetric(m.Copy())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -364,8 +370,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
metricC := make(chan telegraf.Metric, 100)
|
metricC := make(chan telegraf.Metric, 100)
|
||||||
aggC := make(chan telegraf.Metric, 100)
|
aggC := make(chan telegraf.Metric, 100)
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
// Start all ServicePlugins
|
// Start all ServicePlugins
|
||||||
for _, input := range a.Config.Inputs {
|
for _, input := range a.Config.Inputs {
|
||||||
input.SetDefaultTags(a.Config.Tags)
|
input.SetDefaultTags(a.Config.Tags)
|
||||||
|
@ -406,7 +410,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||||
acc := NewAccumulator(agg, aggC)
|
acc := NewAccumulator(agg, aggC)
|
||||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||||
a.Config.Agent.Interval.Duration)
|
a.Config.Agent.Interval.Duration)
|
||||||
agg.Run(acc, now, shutdown)
|
agg.Run(acc, shutdown)
|
||||||
}(aggregator)
|
}(aggregator)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
image: Previous Visual Studio 2015
|
||||||
version: "{build}"
|
version: "{build}"
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
|
@ -12,11 +13,11 @@ platform: x64
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
|
||||||
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
|
- IF NOT EXIST "C:\Cache\go1.9.4.msi" curl -o "C:\Cache\go1.9.4.msi" https://storage.googleapis.com/golang/go1.9.4.windows-amd64.msi
|
||||||
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
|
||||||
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
|
||||||
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
|
||||||
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
|
- msiexec.exe /i "C:\Cache\go1.9.4.msi" /quiet
|
||||||
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
|
||||||
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
|
||||||
- go version
|
- go version
|
||||||
|
|
|
@ -6,8 +6,8 @@ machine:
|
||||||
- rabbitmq-server
|
- rabbitmq-server
|
||||||
post:
|
post:
|
||||||
- sudo rm -rf /usr/local/go
|
- sudo rm -rf /usr/local/go
|
||||||
- wget https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
|
- wget https://storage.googleapis.com/golang/go1.9.4.linux-amd64.tar.gz
|
||||||
- sudo tar -C /usr/local -xzf go1.9.2.linux-amd64.tar.gz
|
- sudo tar -C /usr/local -xzf go1.9.4.linux-amd64.tar.gz
|
||||||
- go version
|
- go version
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
|
|
|
@ -1661,7 +1661,7 @@
|
||||||
# # insecure_skip_verify = false
|
# # insecure_skip_verify = false
|
||||||
#
|
#
|
||||||
# ## Add metrics to read
|
# ## Add metrics to read
|
||||||
# [[inputs.jolokia2.metric]]
|
# [[inputs.jolokia2_agent.metric]]
|
||||||
# name = "java_runtime"
|
# name = "java_runtime"
|
||||||
# mbean = "java.lang:type=Runtime"
|
# mbean = "java.lang:type=Runtime"
|
||||||
# paths = ["Uptime"]
|
# paths = ["Uptime"]
|
||||||
|
|
|
@ -40,6 +40,11 @@ var (
|
||||||
|
|
||||||
// envVarRe is a regex to find environment variables in the config file
|
// envVarRe is a regex to find environment variables in the config file
|
||||||
envVarRe = regexp.MustCompile(`\$\w+`)
|
envVarRe = regexp.MustCompile(`\$\w+`)
|
||||||
|
|
||||||
|
envVarEscaper = strings.NewReplacer(
|
||||||
|
`"`, `\"`,
|
||||||
|
`\`, `\\`,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config specifies the URL/user/password for the database that telegraf
|
// Config specifies the URL/user/password for the database that telegraf
|
||||||
|
@ -689,6 +694,11 @@ func trimBOM(f []byte) []byte {
|
||||||
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// escapeEnv escapes a value for inserting into a TOML string.
|
||||||
|
func escapeEnv(value string) string {
|
||||||
|
return envVarEscaper.Replace(value)
|
||||||
|
}
|
||||||
|
|
||||||
// parseFile loads a TOML configuration from a provided path and
|
// parseFile loads a TOML configuration from a provided path and
|
||||||
// returns the AST produced from the TOML parser. When loading the file, it
|
// returns the AST produced from the TOML parser. When loading the file, it
|
||||||
// will find environment variables and replace them.
|
// will find environment variables and replace them.
|
||||||
|
@ -702,8 +712,9 @@ func parseFile(fpath string) (*ast.Table, error) {
|
||||||
|
|
||||||
env_vars := envVarRe.FindAll(contents, -1)
|
env_vars := envVarRe.FindAll(contents, -1)
|
||||||
for _, env_var := range env_vars {
|
for _, env_var := range env_vars {
|
||||||
env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$"))
|
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
|
||||||
if env_val != "" {
|
if ok {
|
||||||
|
env_val = escapeEnv(env_val)
|
||||||
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
|
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,6 @@ func (r *RunningAggregator) reset() {
|
||||||
// for period ticks to tell it when to push and reset the aggregator.
|
// for period ticks to tell it when to push and reset the aggregator.
|
||||||
func (r *RunningAggregator) Run(
|
func (r *RunningAggregator) Run(
|
||||||
acc telegraf.Accumulator,
|
acc telegraf.Accumulator,
|
||||||
now time.Time,
|
|
||||||
shutdown chan struct{},
|
shutdown chan struct{},
|
||||||
) {
|
) {
|
||||||
// The start of the period is truncated to the nearest second.
|
// The start of the period is truncated to the nearest second.
|
||||||
|
@ -133,6 +132,7 @@ func (r *RunningAggregator) Run(
|
||||||
// 2nd interval: 00:10 - 00:20.5
|
// 2nd interval: 00:10 - 00:20.5
|
||||||
// etc.
|
// etc.
|
||||||
//
|
//
|
||||||
|
now := time.Now()
|
||||||
r.periodStart = now.Truncate(time.Second)
|
r.periodStart = now.Truncate(time.Second)
|
||||||
truncation := now.Sub(r.periodStart)
|
truncation := now.Sub(r.periodStart)
|
||||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||||
|
|
|
@ -24,7 +24,7 @@ func TestAdd(t *testing.T) {
|
||||||
})
|
})
|
||||||
assert.NoError(t, ra.Config.Filter.Compile())
|
assert.NoError(t, ra.Config.Filter.Compile())
|
||||||
acc := testutil.Accumulator{}
|
acc := testutil.Accumulator{}
|
||||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
go ra.Run(&acc, make(chan struct{}))
|
||||||
|
|
||||||
m := ra.MakeMetric(
|
m := ra.MakeMetric(
|
||||||
"RITest",
|
"RITest",
|
||||||
|
@ -55,7 +55,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||||
})
|
})
|
||||||
assert.NoError(t, ra.Config.Filter.Compile())
|
assert.NoError(t, ra.Config.Filter.Compile())
|
||||||
acc := testutil.Accumulator{}
|
acc := testutil.Accumulator{}
|
||||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
go ra.Run(&acc, make(chan struct{}))
|
||||||
|
|
||||||
// metric before current period
|
// metric before current period
|
||||||
m := ra.MakeMetric(
|
m := ra.MakeMetric(
|
||||||
|
@ -113,7 +113,7 @@ func TestAddAndPushOnePeriod(t *testing.T) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
ra.Run(&acc, time.Now(), shutdown)
|
ra.Run(&acc, shutdown)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
m := ra.MakeMetric(
|
m := ra.MakeMetric(
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Bond Input Plugin
|
# Bond Input Plugin
|
||||||
|
|
||||||
The Bond Input plugin collects bond interface status, bond's slaves interfaces
|
The Bond Input plugin collects network bond interface status, bond's slaves interfaces
|
||||||
status and failures count of bond's slaves interfaces.
|
status and failures count of bond's slaves interfaces.
|
||||||
The plugin collects these metrics from `/proc/net/bonding/*` files.
|
The plugin collects these metrics from `/proc/net/bonding/*` files.
|
||||||
|
|
||||||
|
|
|
@ -325,7 +325,7 @@ func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) {
|
||||||
UID: sa.AccountID,
|
UID: sa.AccountID,
|
||||||
StandardClaims: jwt.StandardClaims{
|
StandardClaims: jwt.StandardClaims{
|
||||||
// How long we have to login with this token
|
// How long we have to login with this token
|
||||||
ExpiresAt: int64(5 * time.Minute / time.Second),
|
ExpiresAt: time.Now().Add(5 * time.Minute).Unix(),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
return token.SignedString(sa.PrivateKey)
|
return token.SignedString(sa.PrivateKey)
|
||||||
|
|
|
@ -41,6 +41,8 @@ const sampleConfig = `
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const MaxStderrBytes = 512
|
||||||
|
|
||||||
type Exec struct {
|
type Exec struct {
|
||||||
Commands []string
|
Commands []string
|
||||||
Command string
|
Command string
|
||||||
|
@ -96,15 +98,41 @@ func (c CommandRunner) Run(
|
||||||
|
|
||||||
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
|
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
|
||||||
|
|
||||||
var out bytes.Buffer
|
var (
|
||||||
|
out bytes.Buffer
|
||||||
|
stderr bytes.Buffer
|
||||||
|
)
|
||||||
cmd.Stdout = &out
|
cmd.Stdout = &out
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
|
||||||
if err := internal.RunTimeout(cmd, e.Timeout.Duration); err != nil {
|
if err := internal.RunTimeout(cmd, e.Timeout.Duration); err != nil {
|
||||||
switch e.parser.(type) {
|
switch e.parser.(type) {
|
||||||
case *nagios.NagiosParser:
|
case *nagios.NagiosParser:
|
||||||
AddNagiosState(err, acc)
|
AddNagiosState(err, acc)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("exec: %s for command '%s'", err, command)
|
var errMessage = ""
|
||||||
|
if stderr.Len() > 0 {
|
||||||
|
stderr = removeCarriageReturns(stderr)
|
||||||
|
// Limit the number of bytes.
|
||||||
|
didTruncate := false
|
||||||
|
if stderr.Len() > MaxStderrBytes {
|
||||||
|
stderr.Truncate(MaxStderrBytes)
|
||||||
|
didTruncate = true
|
||||||
|
}
|
||||||
|
if i := bytes.IndexByte(stderr.Bytes(), '\n'); i > 0 {
|
||||||
|
// Only show truncation if the newline wasn't the last character.
|
||||||
|
if i < stderr.Len()-1 {
|
||||||
|
didTruncate = true
|
||||||
|
}
|
||||||
|
stderr.Truncate(i)
|
||||||
|
}
|
||||||
|
if didTruncate {
|
||||||
|
stderr.WriteString("...")
|
||||||
|
}
|
||||||
|
|
||||||
|
errMessage = fmt.Sprintf(": %s", stderr.String())
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("exec: %s for command '%s'%s", err, command, errMessage)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
switch e.parser.(type) {
|
switch e.parser.(type) {
|
||||||
|
|
|
@ -46,7 +46,7 @@ func (ja *JolokiaAgent) SampleConfig() string {
|
||||||
# insecure_skip_verify = false
|
# insecure_skip_verify = false
|
||||||
|
|
||||||
## Add metrics to read
|
## Add metrics to read
|
||||||
[[inputs.jolokia2.metric]]
|
[[inputs.jolokia2_agent.metric]]
|
||||||
name = "java_runtime"
|
name = "java_runtime"
|
||||||
mbean = "java.lang:type=Runtime"
|
mbean = "java.lang:type=Runtime"
|
||||||
paths = ["Uptime"]
|
paths = ["Uptime"]
|
||||||
|
|
|
@ -4,6 +4,8 @@ This plugin gathers metrics from OpenLDAP's cn=Monitor backend.
|
||||||
|
|
||||||
### Configuration:
|
### Configuration:
|
||||||
|
|
||||||
|
To use this plugin you must enable the [monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend.
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[[inputs.openldap]]
|
[[inputs.openldap]]
|
||||||
host = "localhost"
|
host = "localhost"
|
||||||
|
|
|
@ -102,7 +102,7 @@ func (p *process) getUptime() int64 {
|
||||||
uptime += value * (24 * 60 * 60)
|
uptime += value * (24 * 60 * 60)
|
||||||
}
|
}
|
||||||
case strings.HasSuffix(v, "h"):
|
case strings.HasSuffix(v, "h"):
|
||||||
iValue := strings.TrimSuffix(v, "y")
|
iValue := strings.TrimSuffix(v, "h")
|
||||||
value, err := strconv.ParseInt(iValue, 10, 64)
|
value, err := strconv.ParseInt(iValue, 10, 64)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
uptime += value * (60 * 60)
|
uptime += value * (60 * 60)
|
||||||
|
|
|
@ -126,7 +126,7 @@ func TestPassengerGenerateMetric(t *testing.T) {
|
||||||
"spawn_start_time": int64(1452746844946982),
|
"spawn_start_time": int64(1452746844946982),
|
||||||
"spawn_end_time": int64(1452746845013365),
|
"spawn_end_time": int64(1452746845013365),
|
||||||
"last_used": int64(1452747071764940),
|
"last_used": int64(1452747071764940),
|
||||||
"uptime": int64(226), // in seconds of 3m 46s
|
"uptime": int64(191026), // in seconds of 2d 5h 3m 46s
|
||||||
"cpu": int64(58),
|
"cpu": int64(58),
|
||||||
"rss": int64(418548),
|
"rss": int64(418548),
|
||||||
"pss": int64(319391),
|
"pss": int64(319391),
|
||||||
|
@ -219,7 +219,7 @@ var sampleStat = `
|
||||||
<spawn_end_time>1452746845013365</spawn_end_time>
|
<spawn_end_time>1452746845013365</spawn_end_time>
|
||||||
<last_used>1452747071764940</last_used>
|
<last_used>1452747071764940</last_used>
|
||||||
<last_used_desc>0s ago</last_used_desc>
|
<last_used_desc>0s ago</last_used_desc>
|
||||||
<uptime>3m 46s</uptime>
|
<uptime>2d 5h 3m 46s</uptime>
|
||||||
<code_revision>899ac7f</code_revision>
|
<code_revision>899ac7f</code_revision>
|
||||||
<life_status>ALIVE</life_status>
|
<life_status>ALIVE</life_status>
|
||||||
<enabled>ENABLED</enabled>
|
<enabled>ENABLED</enabled>
|
||||||
|
@ -263,7 +263,7 @@ var sampleStat = `
|
||||||
<spawn_end_time>1452746845172460</spawn_end_time>
|
<spawn_end_time>1452746845172460</spawn_end_time>
|
||||||
<last_used>1452747071709179</last_used>
|
<last_used>1452747071709179</last_used>
|
||||||
<last_used_desc>0s ago</last_used_desc>
|
<last_used_desc>0s ago</last_used_desc>
|
||||||
<uptime>3m 46s</uptime>
|
<uptime>2d 5h 3m 46s</uptime>
|
||||||
<code_revision>899ac7f</code_revision>
|
<code_revision>899ac7f</code_revision>
|
||||||
<life_status>ALIVE</life_status>
|
<life_status>ALIVE</life_status>
|
||||||
<enabled>ENABLED</enabled>
|
<enabled>ENABLED</enabled>
|
||||||
|
|
|
@ -128,16 +128,16 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
||||||
fields["packets_transmitted"] = trans
|
fields["packets_transmitted"] = trans
|
||||||
fields["packets_received"] = rec
|
fields["packets_received"] = rec
|
||||||
fields["percent_packet_loss"] = loss
|
fields["percent_packet_loss"] = loss
|
||||||
if min > 0 {
|
if min >= 0 {
|
||||||
fields["minimum_response_ms"] = min
|
fields["minimum_response_ms"] = min
|
||||||
}
|
}
|
||||||
if avg > 0 {
|
if avg >= 0 {
|
||||||
fields["average_response_ms"] = avg
|
fields["average_response_ms"] = avg
|
||||||
}
|
}
|
||||||
if max > 0 {
|
if max >= 0 {
|
||||||
fields["maximum_response_ms"] = max
|
fields["maximum_response_ms"] = max
|
||||||
}
|
}
|
||||||
if stddev > 0 {
|
if stddev >= 0 {
|
||||||
fields["standard_deviation_ms"] = stddev
|
fields["standard_deviation_ms"] = stddev
|
||||||
}
|
}
|
||||||
acc.AddFields("ping", fields, tags)
|
acc.AddFields("ping", fields, tags)
|
||||||
|
@ -198,7 +198,7 @@ func (p *Ping) args(url string) []string {
|
||||||
// It returns (<transmitted packets>, <received packets>, <average response>)
|
// It returns (<transmitted packets>, <received packets>, <average response>)
|
||||||
func processPingOutput(out string) (int, int, float64, float64, float64, float64, error) {
|
func processPingOutput(out string) (int, int, float64, float64, float64, float64, error) {
|
||||||
var trans, recv int
|
var trans, recv int
|
||||||
var min, avg, max, stddev float64
|
var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0
|
||||||
// Set this error to nil if we find a 'transmitted' line
|
// Set this error to nil if we find a 'transmitted' line
|
||||||
err := errors.New("Fatal error processing ping output")
|
err := errors.New("Fatal error processing ping output")
|
||||||
lines := strings.Split(out, "\n")
|
lines := strings.Split(out, "\n")
|
||||||
|
|
|
@ -93,32 +93,32 @@ func processPingOutput(out string) (int, int, int, int, int, int, error) {
|
||||||
|
|
||||||
// stats data should contain 4 members: entireExpression + ( Send, Receive, Lost )
|
// stats data should contain 4 members: entireExpression + ( Send, Receive, Lost )
|
||||||
if len(stats) != 4 {
|
if len(stats) != 4 {
|
||||||
return 0, 0, 0, 0, 0, 0, err
|
return 0, 0, 0, -1, -1, -1, err
|
||||||
}
|
}
|
||||||
trans, err := strconv.Atoi(stats[1])
|
trans, err := strconv.Atoi(stats[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, 0, 0, 0, err
|
return 0, 0, 0, -1, -1, -1, err
|
||||||
}
|
}
|
||||||
receivedPacket, err := strconv.Atoi(stats[2])
|
receivedPacket, err := strconv.Atoi(stats[2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, 0, 0, 0, err
|
return 0, 0, 0, -1, -1, -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// aproxs data should contain 4 members: entireExpression + ( min, max, avg )
|
// aproxs data should contain 4 members: entireExpression + ( min, max, avg )
|
||||||
if len(aproxs) != 4 {
|
if len(aproxs) != 4 {
|
||||||
return trans, receivedReply, receivedPacket, 0, 0, 0, err
|
return trans, receivedReply, receivedPacket, -1, -1, -1, err
|
||||||
}
|
}
|
||||||
min, err := strconv.Atoi(aproxs[1])
|
min, err := strconv.Atoi(aproxs[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return trans, receivedReply, receivedPacket, 0, 0, 0, err
|
return trans, receivedReply, receivedPacket, -1, -1, -1, err
|
||||||
}
|
}
|
||||||
max, err := strconv.Atoi(aproxs[2])
|
max, err := strconv.Atoi(aproxs[2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return trans, receivedReply, receivedPacket, 0, 0, 0, err
|
return trans, receivedReply, receivedPacket, -1, -1, -1, err
|
||||||
}
|
}
|
||||||
avg, err := strconv.Atoi(aproxs[3])
|
avg, err := strconv.Atoi(aproxs[3])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, 0, 0, 0, err
|
return 0, 0, 0, -1, -1, -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return trans, receivedReply, receivedPacket, avg, min, max, err
|
return trans, receivedReply, receivedPacket, avg, min, max, err
|
||||||
|
@ -201,13 +201,13 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
|
||||||
fields["packets_received"] = receivePacket
|
fields["packets_received"] = receivePacket
|
||||||
fields["percent_packet_loss"] = lossPackets
|
fields["percent_packet_loss"] = lossPackets
|
||||||
fields["percent_reply_loss"] = lossReply
|
fields["percent_reply_loss"] = lossReply
|
||||||
if avg > 0 {
|
if avg >= 0 {
|
||||||
fields["average_response_ms"] = float64(avg)
|
fields["average_response_ms"] = float64(avg)
|
||||||
}
|
}
|
||||||
if min > 0 {
|
if min >= 0 {
|
||||||
fields["minimum_response_ms"] = float64(min)
|
fields["minimum_response_ms"] = float64(min)
|
||||||
}
|
}
|
||||||
if max > 0 {
|
if max >= 0 {
|
||||||
fields["maximum_response_ms"] = float64(max)
|
fields["maximum_response_ms"] = float64(max)
|
||||||
}
|
}
|
||||||
acc.AddFields("ping", fields, tags)
|
acc.AddFields("ping", fields, tags)
|
||||||
|
|
|
@ -13,6 +13,25 @@ For each of the active, hold, incoming, maildrop, and deferred queues (http://ww
|
||||||
# queue_directory = "/var/spool/postfix"
|
# queue_directory = "/var/spool/postfix"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Permissions:
|
||||||
|
|
||||||
|
Telegraf will need read access to the files in the queue directory. You may
|
||||||
|
need to alter the permissions of these directories to provide access to the
|
||||||
|
telegraf user.
|
||||||
|
|
||||||
|
Unix permissions:
|
||||||
|
```sh
|
||||||
|
$ sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred}
|
||||||
|
$ sudo chmod -R g+rXs /var/spool/postfix/{active,hold,incoming,deferred}
|
||||||
|
$ sudo usermod -a -G postdrop telegraf
|
||||||
|
$ sudo chmod g+r /var/spool/postfix/maildrop
|
||||||
|
```
|
||||||
|
|
||||||
|
Posix ACL:
|
||||||
|
```sh
|
||||||
|
$ sudo setfacl -Rdm u:telegraf:rX /var/spool/postfix/{active,hold,incoming,deferred,maildrop}
|
||||||
|
```
|
||||||
|
|
||||||
### Measurements & Fields:
|
### Measurements & Fields:
|
||||||
|
|
||||||
- postfix_queue
|
- postfix_queue
|
||||||
|
|
|
@ -127,6 +127,8 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
|
||||||
meas_name string
|
meas_name string
|
||||||
)
|
)
|
||||||
|
|
||||||
|
p.AllColumns = nil
|
||||||
|
|
||||||
if p.Address == "" || p.Address == "localhost" {
|
if p.Address == "" || p.Address == "localhost" {
|
||||||
p.Address = localhost
|
p.Address = localhost
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ Measurement names are based on the Metric Family and tags are created for each
|
||||||
label. The value is added to a field named based on the metric type.
|
label. The value is added to a field named based on the metric type.
|
||||||
|
|
||||||
All metrics receive the `url` tag indicating the related URL specified in the
|
All metrics receive the `url` tag indicating the related URL specified in the
|
||||||
Telegraf configuration. If using Kubernetes service discovery the `address`
|
Telegraf configuration. If using Kubernetes service discovery the `address`
|
||||||
tag is also added indicating the discovered ip address.
|
tag is also added indicating the discovered ip address.
|
||||||
|
|
||||||
### Example Output:
|
### Example Output:
|
||||||
|
|
|
@ -20,7 +20,7 @@ const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client
|
||||||
|
|
||||||
type Prometheus struct {
|
type Prometheus struct {
|
||||||
// An array of urls to scrape metrics from.
|
// An array of urls to scrape metrics from.
|
||||||
Urls []string
|
URLs []string `toml:"urls"`
|
||||||
|
|
||||||
// An array of Kubernetes services to scrape metrics from.
|
// An array of Kubernetes services to scrape metrics from.
|
||||||
KubernetesServices []string
|
KubernetesServices []string
|
||||||
|
@ -73,12 +73,12 @@ func (p *Prometheus) Description() string {
|
||||||
|
|
||||||
var ErrProtocolError = errors.New("prometheus protocol error")
|
var ErrProtocolError = errors.New("prometheus protocol error")
|
||||||
|
|
||||||
func (p *Prometheus) AddressToURL(u *url.URL, address string) string {
|
func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL {
|
||||||
host := address
|
host := address
|
||||||
if u.Port() != "" {
|
if u.Port() != "" {
|
||||||
host = address + ":" + u.Port()
|
host = address + ":" + u.Port()
|
||||||
}
|
}
|
||||||
reconstructedUrl := url.URL{
|
reconstructedURL := &url.URL{
|
||||||
Scheme: u.Scheme,
|
Scheme: u.Scheme,
|
||||||
Opaque: u.Opaque,
|
Opaque: u.Opaque,
|
||||||
User: u.User,
|
User: u.User,
|
||||||
|
@ -89,36 +89,42 @@ func (p *Prometheus) AddressToURL(u *url.URL, address string) string {
|
||||||
Fragment: u.Fragment,
|
Fragment: u.Fragment,
|
||||||
Host: host,
|
Host: host,
|
||||||
}
|
}
|
||||||
return reconstructedUrl.String()
|
return reconstructedURL
|
||||||
}
|
}
|
||||||
|
|
||||||
type UrlAndAddress struct {
|
type URLAndAddress struct {
|
||||||
OriginalUrl string
|
OriginalURL *url.URL
|
||||||
Url string
|
URL *url.URL
|
||||||
Address string
|
Address string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Prometheus) GetAllURLs() ([]UrlAndAddress, error) {
|
func (p *Prometheus) GetAllURLs() ([]URLAndAddress, error) {
|
||||||
allUrls := make([]UrlAndAddress, 0)
|
allURLs := make([]URLAndAddress, 0)
|
||||||
for _, url := range p.Urls {
|
for _, u := range p.URLs {
|
||||||
allUrls = append(allUrls, UrlAndAddress{Url: url, OriginalUrl: url})
|
URL, err := url.Parse(u)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("prometheus: Could not parse %s, skipping it. Error: %s", u, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
allURLs = append(allURLs, URLAndAddress{URL: URL, OriginalURL: URL})
|
||||||
}
|
}
|
||||||
for _, service := range p.KubernetesServices {
|
for _, service := range p.KubernetesServices {
|
||||||
u, err := url.Parse(service)
|
URL, err := url.Parse(service)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
resolvedAddresses, err := net.LookupHost(u.Hostname())
|
resolvedAddresses, err := net.LookupHost(URL.Hostname())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", u.Host, err)
|
log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", URL.Host, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, resolved := range resolvedAddresses {
|
for _, resolved := range resolvedAddresses {
|
||||||
serviceUrl := p.AddressToURL(u, resolved)
|
serviceURL := p.AddressToURL(URL, resolved)
|
||||||
allUrls = append(allUrls, UrlAndAddress{Url: serviceUrl, Address: resolved, OriginalUrl: service})
|
allURLs = append(allURLs, URLAndAddress{URL: serviceURL, Address: resolved, OriginalURL: URL})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return allUrls, nil
|
return allURLs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reads stats from all configured servers accumulates stats.
|
// Reads stats from all configured servers accumulates stats.
|
||||||
|
@ -134,16 +140,16 @@ func (p *Prometheus) Gather(acc telegraf.Accumulator) error {
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
allUrls, err := p.GetAllURLs()
|
allURLs, err := p.GetAllURLs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, url := range allUrls {
|
for _, URL := range allURLs {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(serviceUrl UrlAndAddress) {
|
go func(serviceURL URLAndAddress) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
acc.AddError(p.gatherURL(serviceUrl, acc))
|
acc.AddError(p.gatherURL(serviceURL, acc))
|
||||||
}(url)
|
}(URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@ -178,8 +184,8 @@ func (p *Prometheus) createHttpClient() (*http.Client, error) {
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Prometheus) gatherURL(url UrlAndAddress, acc telegraf.Accumulator) error {
|
func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error {
|
||||||
var req, err = http.NewRequest("GET", url.Url, nil)
|
var req, err = http.NewRequest("GET", u.URL.String(), nil)
|
||||||
req.Header.Add("Accept", acceptHeader)
|
req.Header.Add("Accept", acceptHeader)
|
||||||
var token []byte
|
var token []byte
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
|
@ -194,11 +200,11 @@ func (p *Prometheus) gatherURL(url UrlAndAddress, acc telegraf.Accumulator) erro
|
||||||
|
|
||||||
resp, err = p.client.Do(req)
|
resp, err = p.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error making HTTP request to %s: %s", url.Url, err)
|
return fmt.Errorf("error making HTTP request to %s: %s", u.URL, err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return fmt.Errorf("%s returned HTTP status %s", url.Url, resp.Status)
|
return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
@ -209,14 +215,16 @@ func (p *Prometheus) gatherURL(url UrlAndAddress, acc telegraf.Accumulator) erro
|
||||||
metrics, err := Parse(body, resp.Header)
|
metrics, err := Parse(body, resp.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error reading metrics for %s: %s",
|
return fmt.Errorf("error reading metrics for %s: %s",
|
||||||
url.Url, err)
|
u.URL, err)
|
||||||
}
|
}
|
||||||
// Add (or not) collected metrics
|
// Add (or not) collected metrics
|
||||||
for _, metric := range metrics {
|
for _, metric := range metrics {
|
||||||
tags := metric.Tags()
|
tags := metric.Tags()
|
||||||
tags["url"] = url.OriginalUrl
|
// strip user and password from URL
|
||||||
if url.Address != "" {
|
u.OriginalURL.User = nil
|
||||||
tags["address"] = url.Address
|
tags["url"] = u.OriginalURL.String()
|
||||||
|
if u.Address != "" {
|
||||||
|
tags["address"] = u.Address
|
||||||
}
|
}
|
||||||
|
|
||||||
switch metric.Type() {
|
switch metric.Type() {
|
||||||
|
|
|
@ -37,7 +37,7 @@ func TestPrometheusGeneratesMetrics(t *testing.T) {
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
p := &Prometheus{
|
p := &Prometheus{
|
||||||
Urls: []string{ts.URL},
|
URLs: []string{ts.URL},
|
||||||
}
|
}
|
||||||
|
|
||||||
var acc testutil.Accumulator
|
var acc testutil.Accumulator
|
||||||
|
@ -89,7 +89,7 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) {
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
p := &Prometheus{
|
p := &Prometheus{
|
||||||
Urls: []string{ts.URL},
|
URLs: []string{ts.URL},
|
||||||
KubernetesServices: []string{"http://random.telegraf.local:88/metrics"},
|
KubernetesServices: []string{"http://random.telegraf.local:88/metrics"},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ type MessageStats struct {
|
||||||
AckDetails Details `json:"ack_details"`
|
AckDetails Details `json:"ack_details"`
|
||||||
Deliver int64
|
Deliver int64
|
||||||
DeliverDetails Details `json:"deliver_details"`
|
DeliverDetails Details `json:"deliver_details"`
|
||||||
DeliverGet int64
|
DeliverGet int64 `json:"deliver_get"`
|
||||||
DeliverGetDetails Details `json:"deliver_get_details"`
|
DeliverGetDetails Details `json:"deliver_get_details"`
|
||||||
Publish int64
|
Publish int64
|
||||||
PublishDetails Details `json:"publish_details"`
|
PublishDetails Details `json:"publish_details"`
|
||||||
|
|
|
@ -88,6 +88,7 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a
|
||||||
**Replication**
|
**Replication**
|
||||||
- connected_slaves(int, number)
|
- connected_slaves(int, number)
|
||||||
- master_repl_offset(int, number)
|
- master_repl_offset(int, number)
|
||||||
|
- second_repl_offset(int, number)
|
||||||
- repl_backlog_active(int, number)
|
- repl_backlog_active(int, number)
|
||||||
- repl_backlog_size(int, bytes)
|
- repl_backlog_size(int, bytes)
|
||||||
- repl_backlog_first_byte_offset(int, number)
|
- repl_backlog_first_byte_offset(int, number)
|
||||||
|
|
|
@ -189,6 +189,10 @@ func gatherInfoOutput(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(name, "master_replid") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if name == "mem_allocator" {
|
if name == "mem_allocator" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,6 +86,7 @@ func TestRedis_ParseMetrics(t *testing.T) {
|
||||||
"repl_backlog_size": int64(1048576),
|
"repl_backlog_size": int64(1048576),
|
||||||
"repl_backlog_first_byte_offset": int64(0),
|
"repl_backlog_first_byte_offset": int64(0),
|
||||||
"repl_backlog_histlen": int64(0),
|
"repl_backlog_histlen": int64(0),
|
||||||
|
"second_repl_offset": int64(-1),
|
||||||
"used_cpu_sys": float64(0.14),
|
"used_cpu_sys": float64(0.14),
|
||||||
"used_cpu_user": float64(0.05),
|
"used_cpu_user": float64(0.05),
|
||||||
"used_cpu_sys_children": float64(0.00),
|
"used_cpu_sys_children": float64(0.00),
|
||||||
|
@ -189,7 +190,10 @@ latest_fork_usec:0
|
||||||
# Replication
|
# Replication
|
||||||
role:master
|
role:master
|
||||||
connected_slaves:0
|
connected_slaves:0
|
||||||
|
master_replid:8c4d7b768b26826825ceb20ff4a2c7c54616350b
|
||||||
|
master_replid2:0000000000000000000000000000000000000000
|
||||||
master_repl_offset:0
|
master_repl_offset:0
|
||||||
|
second_repl_offset:-1
|
||||||
repl_backlog_active:0
|
repl_backlog_active:0
|
||||||
repl_backlog_size:1048576
|
repl_backlog_size:1048576
|
||||||
repl_backlog_first_byte_offset:0
|
repl_backlog_first_byte_offset:0
|
||||||
|
|
|
@ -129,7 +129,7 @@ the configuration to execute that.
|
||||||
|
|
||||||
Example output from an _Apple SSD_:
|
Example output from an _Apple SSD_:
|
||||||
```
|
```
|
||||||
> smart_attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=199,name=UDMA_CRC_Error_Count,flags=-O-RC-,fail=-,host=mbpro.local,device=/dev/rdisk0 threshold=0i,raw_value=0i,exit_status=0i,value=200i,worst=200i 1502536854000000000
|
> smart_attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=199,name=UDMA_CRC_Error_Count,flags=-O-RC-,fail=-,host=mbpro.local,device=rdisk0 threshold=0i,raw_value=0i,exit_status=0i,value=200i,worst=200i 1502536854000000000
|
||||||
> smart_attribute,device=/dev/rdisk0,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=240,name=Unknown_SSD_Attribute,flags=-O---K,fail=-,host=mbpro.local exit_status=0i,value=100i,worst=100i,threshold=0i,raw_value=0i 1502536854000000000
|
> smart_attribute,device=rdisk0,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=240,name=Unknown_SSD_Attribute,flags=-O---K,fail=-,host=mbpro.local exit_status=0i,value=100i,worst=100i,threshold=0i,raw_value=0i 1502536854000000000
|
||||||
> smart_device,enabled=Enabled,host=mbpro.local,device=/dev/rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000
|
> smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000
|
||||||
```
|
```
|
||||||
|
|
|
@ -3,6 +3,7 @@ package smart
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -134,7 +135,7 @@ func (m *Smart) scan() ([]string, error) {
|
||||||
|
|
||||||
devices := []string{}
|
devices := []string{}
|
||||||
for _, line := range strings.Split(string(out), "\n") {
|
for _, line := range strings.Split(string(out), "\n") {
|
||||||
dev := strings.Split(line, "#")
|
dev := strings.Split(line, " ")
|
||||||
if len(dev) > 1 && !excludedDev(m.Excludes, strings.TrimSpace(dev[0])) {
|
if len(dev) > 1 && !excludedDev(m.Excludes, strings.TrimSpace(dev[0])) {
|
||||||
devices = append(devices, strings.TrimSpace(dev[0]))
|
devices = append(devices, strings.TrimSpace(dev[0]))
|
||||||
}
|
}
|
||||||
|
@ -178,13 +179,13 @@ func exitStatus(err error) (int, error) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, path, nockeck, device string, wg *sync.WaitGroup) {
|
func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, smartctl, nockeck, device string, wg *sync.WaitGroup) {
|
||||||
|
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
// smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n
|
// smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n
|
||||||
args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nockeck, "--format=brief"}
|
args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nockeck, "--format=brief"}
|
||||||
args = append(args, strings.Split(device, " ")...)
|
args = append(args, strings.Split(device, " ")...)
|
||||||
cmd := sudo(usesudo, path, args...)
|
cmd := sudo(usesudo, smartctl, args...)
|
||||||
out, e := internal.CombinedOutputTimeout(cmd, time.Second*5)
|
out, e := internal.CombinedOutputTimeout(cmd, time.Second*5)
|
||||||
outStr := string(out)
|
outStr := string(out)
|
||||||
|
|
||||||
|
@ -196,7 +197,8 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, path, nockec
|
||||||
}
|
}
|
||||||
|
|
||||||
device_tags := map[string]string{}
|
device_tags := map[string]string{}
|
||||||
device_tags["device"] = strings.Split(device, " ")[0]
|
device_node := strings.Split(device, " ")[0]
|
||||||
|
device_tags["device"] = path.Base(device_node)
|
||||||
device_fields := make(map[string]interface{})
|
device_fields := make(map[string]interface{})
|
||||||
device_fields["exit_status"] = exitStatus
|
device_fields["exit_status"] = exitStatus
|
||||||
|
|
||||||
|
@ -240,7 +242,8 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, path, nockec
|
||||||
tags := map[string]string{}
|
tags := map[string]string{}
|
||||||
fields := make(map[string]interface{})
|
fields := make(map[string]interface{})
|
||||||
|
|
||||||
tags["device"] = strings.Split(device, " ")[0]
|
device_node := strings.Split(device, " ")[0]
|
||||||
|
tags["device"] = path.Base(device_node)
|
||||||
|
|
||||||
if serial, ok := device_tags["serial_no"]; ok {
|
if serial, ok := device_tags["serial_no"]; ok {
|
||||||
tags["serial_no"] = serial
|
tags["serial_no"] = serial
|
||||||
|
|
|
@ -89,7 +89,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "1",
|
"id": "1",
|
||||||
|
@ -107,7 +107,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "5",
|
"id": "5",
|
||||||
|
@ -125,7 +125,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "9",
|
"id": "9",
|
||||||
|
@ -143,7 +143,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "12",
|
"id": "12",
|
||||||
|
@ -161,7 +161,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "169",
|
"id": "169",
|
||||||
|
@ -179,7 +179,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "173",
|
"id": "173",
|
||||||
|
@ -197,7 +197,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "190",
|
"id": "190",
|
||||||
|
@ -215,7 +215,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "192",
|
"id": "192",
|
||||||
|
@ -233,7 +233,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "194",
|
"id": "194",
|
||||||
|
@ -251,7 +251,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "197",
|
"id": "197",
|
||||||
|
@ -269,7 +269,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "199",
|
"id": "199",
|
||||||
|
@ -287,7 +287,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"exit_status": int(0),
|
"exit_status": int(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
"id": "240",
|
"id": "240",
|
||||||
|
@ -317,7 +317,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||||
"udma_crc_errors": int64(0),
|
"udma_crc_errors": int64(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"model": "APPLE SSD SM256E",
|
"model": "APPLE SSD SM256E",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
|
@ -363,7 +363,7 @@ func TestGatherNoAttributes(t *testing.T) {
|
||||||
"udma_crc_errors": int64(0),
|
"udma_crc_errors": int64(0),
|
||||||
},
|
},
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"device": "/dev/ada0",
|
"device": "ada0",
|
||||||
"model": "APPLE SSD SM256E",
|
"model": "APPLE SSD SM256E",
|
||||||
"serial_no": "S0X5NZBC422720",
|
"serial_no": "S0X5NZBC422720",
|
||||||
"wwn": "5002538043584d30",
|
"wwn": "5002538043584d30",
|
||||||
|
|
|
@ -246,6 +246,9 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo
|
||||||
// Add core metrics section to accumulator
|
// Add core metrics section to accumulator
|
||||||
func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||||
var coreMetrics map[string]Core
|
var coreMetrics map[string]Core
|
||||||
|
if len(mBeansData.SolrMbeans) < 2 {
|
||||||
|
return fmt.Errorf("no core metric data to unmarshall")
|
||||||
|
}
|
||||||
if err := json.Unmarshal(mBeansData.SolrMbeans[1], &coreMetrics); err != nil {
|
if err := json.Unmarshal(mBeansData.SolrMbeans[1], &coreMetrics); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -274,9 +277,14 @@ func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBea
|
||||||
func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||||
var queryMetrics map[string]QueryHandler
|
var queryMetrics map[string]QueryHandler
|
||||||
|
|
||||||
|
if len(mBeansData.SolrMbeans) < 4 {
|
||||||
|
return fmt.Errorf("no query handler metric data to unmarshall")
|
||||||
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(mBeansData.SolrMbeans[3], &queryMetrics); err != nil {
|
if err := json.Unmarshal(mBeansData.SolrMbeans[3], &queryMetrics); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, metrics := range queryMetrics {
|
for name, metrics := range queryMetrics {
|
||||||
coreFields := map[string]interface{}{
|
coreFields := map[string]interface{}{
|
||||||
"15min_rate_reqs_per_second": metrics.Stats.One5minRateReqsPerSecond,
|
"15min_rate_reqs_per_second": metrics.Stats.One5minRateReqsPerSecond,
|
||||||
|
@ -310,6 +318,9 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa
|
||||||
func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||||
var updateMetrics map[string]UpdateHandler
|
var updateMetrics map[string]UpdateHandler
|
||||||
|
|
||||||
|
if len(mBeansData.SolrMbeans) < 6 {
|
||||||
|
return fmt.Errorf("no update handler metric data to unmarshall")
|
||||||
|
}
|
||||||
if err := json.Unmarshal(mBeansData.SolrMbeans[5], &updateMetrics); err != nil {
|
if err := json.Unmarshal(mBeansData.SolrMbeans[5], &updateMetrics); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -364,6 +375,9 @@ func getFloat(unk interface{}) float64 {
|
||||||
|
|
||||||
// Add cache metrics section to accumulator
|
// Add cache metrics section to accumulator
|
||||||
func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||||
|
if len(mBeansData.SolrMbeans) < 8 {
|
||||||
|
return fmt.Errorf("no cache metric data to unmarshall")
|
||||||
|
}
|
||||||
var cacheMetrics map[string]Cache
|
var cacheMetrics map[string]Cache
|
||||||
if err := json.Unmarshal(mBeansData.SolrMbeans[7], &cacheMetrics); err != nil {
|
if err := json.Unmarshal(mBeansData.SolrMbeans[7], &cacheMetrics); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -60,3 +60,44 @@ func createMockServer() *httptest.Server {
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNoCoreDataHandling(t *testing.T) {
|
||||||
|
ts := createMockNoCoreDataServer()
|
||||||
|
solr := NewSolr()
|
||||||
|
solr.Servers = []string{ts.URL}
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
require.NoError(t, solr.Gather(&acc))
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "solr_admin",
|
||||||
|
solrAdminMainCoreStatusExpected,
|
||||||
|
map[string]string{"core": "main"})
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "solr_admin",
|
||||||
|
solrAdminCore1StatusExpected,
|
||||||
|
map[string]string{"core": "core1"})
|
||||||
|
|
||||||
|
acc.AssertDoesNotContainMeasurement(t, "solr_core")
|
||||||
|
acc.AssertDoesNotContainMeasurement(t, "solr_queryhandler")
|
||||||
|
acc.AssertDoesNotContainMeasurement(t, "solr_updatehandler")
|
||||||
|
acc.AssertDoesNotContainMeasurement(t, "solr_handler")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func createMockNoCoreDataServer() *httptest.Server {
|
||||||
|
var nodata string
|
||||||
|
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprintln(w, statusResponse)
|
||||||
|
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprintln(w, nodata)
|
||||||
|
} else if strings.Contains(r.URL.Path, "solr/core1/admin") {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprintln(w, nodata)
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
fmt.Fprintln(w, "nope")
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
|
@ -1116,30 +1116,30 @@ DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetwee
|
||||||
|
|
||||||
DECLARE @w1 TABLE
|
DECLARE @w1 TABLE
|
||||||
(
|
(
|
||||||
WaitType nvarchar(64) NOT NULL,
|
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
|
||||||
WaitTimeInMs bigint NOT NULL,
|
WaitTimeInMs bigint NOT NULL,
|
||||||
WaitTaskCount bigint NOT NULL,
|
WaitTaskCount bigint NOT NULL,
|
||||||
CollectionDate datetime NOT NULL
|
CollectionDate datetime NOT NULL
|
||||||
)
|
)
|
||||||
DECLARE @w2 TABLE
|
DECLARE @w2 TABLE
|
||||||
(
|
(
|
||||||
WaitType nvarchar(64) NOT NULL,
|
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
|
||||||
WaitTimeInMs bigint NOT NULL,
|
WaitTimeInMs bigint NOT NULL,
|
||||||
WaitTaskCount bigint NOT NULL,
|
WaitTaskCount bigint NOT NULL,
|
||||||
CollectionDate datetime NOT NULL
|
CollectionDate datetime NOT NULL
|
||||||
)
|
)
|
||||||
DECLARE @w3 TABLE
|
DECLARE @w3 TABLE
|
||||||
(
|
(
|
||||||
WaitType nvarchar(64) NOT NULL
|
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
|
||||||
)
|
)
|
||||||
DECLARE @w4 TABLE
|
DECLARE @w4 TABLE
|
||||||
(
|
(
|
||||||
WaitType nvarchar(64) NOT NULL,
|
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
|
||||||
WaitCategory nvarchar(64) NOT NULL
|
WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
|
||||||
)
|
)
|
||||||
DECLARE @w5 TABLE
|
DECLARE @w5 TABLE
|
||||||
(
|
(
|
||||||
WaitCategory nvarchar(64) NOT NULL,
|
WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
|
||||||
WaitTimeInMs bigint NOT NULL,
|
WaitTimeInMs bigint NOT NULL,
|
||||||
WaitTaskCount bigint NOT NULL
|
WaitTaskCount bigint NOT NULL
|
||||||
)
|
)
|
||||||
|
@ -1380,12 +1380,12 @@ INSERT @w4 (WaitType, WaitCategory) VALUES ('ABR', 'OTHER') ,
|
||||||
|
|
||||||
INSERT @w1 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
|
INSERT @w1 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
|
||||||
SELECT
|
SELECT
|
||||||
WaitType = wait_type
|
WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
|
||||||
, WaitTimeInMs = SUM(wait_time_ms)
|
, WaitTimeInMs = SUM(wait_time_ms)
|
||||||
, WaitTaskCount = SUM(waiting_tasks_count)
|
, WaitTaskCount = SUM(waiting_tasks_count)
|
||||||
, CollectionDate = GETDATE()
|
, CollectionDate = GETDATE()
|
||||||
FROM sys.dm_os_wait_stats
|
FROM sys.dm_os_wait_stats
|
||||||
WHERE [wait_type] NOT IN
|
WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
|
||||||
(
|
(
|
||||||
SELECT WaitType FROM @w3
|
SELECT WaitType FROM @w3
|
||||||
)
|
)
|
||||||
|
@ -1396,12 +1396,12 @@ WAITFOR DELAY @delayInterval;
|
||||||
|
|
||||||
INSERT @w2 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
|
INSERT @w2 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
|
||||||
SELECT
|
SELECT
|
||||||
WaitType = wait_type
|
WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
|
||||||
, WaitTimeInMs = SUM(wait_time_ms)
|
, WaitTimeInMs = SUM(wait_time_ms)
|
||||||
, WaitTaskCount = SUM(waiting_tasks_count)
|
, WaitTaskCount = SUM(waiting_tasks_count)
|
||||||
, CollectionDate = GETDATE()
|
, CollectionDate = GETDATE()
|
||||||
FROM sys.dm_os_wait_stats
|
FROM sys.dm_os_wait_stats
|
||||||
WHERE [wait_type] NOT IN
|
WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
|
||||||
(
|
(
|
||||||
SELECT WaitType FROM @w3
|
SELECT WaitType FROM @w3
|
||||||
)
|
)
|
||||||
|
|
|
@ -30,7 +30,7 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info
|
||||||
- zombie
|
- zombie
|
||||||
- dead
|
- dead
|
||||||
- wait (freebsd only)
|
- wait (freebsd only)
|
||||||
- idle (bsd only)
|
- idle (bsd and Linux 4+ only)
|
||||||
- paging (linux only)
|
- paging (linux only)
|
||||||
- total_threads (linux only)
|
- total_threads (linux only)
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ Linux FreeBSD Darwin meaning
|
||||||
Z Z Z zombie
|
Z Z Z zombie
|
||||||
X none none dead
|
X none none dead
|
||||||
T T T stopped
|
T T T stopped
|
||||||
none I I idle (sleeping for longer than about 20 seconds)
|
I I I idle (sleeping for longer than about 20 seconds)
|
||||||
D D,L U blocked (waiting in uninterruptible sleep, or locked)
|
D D,L U blocked (waiting in uninterruptible sleep, or locked)
|
||||||
W W none paging (linux kernel < 2.6 only), wait (freebsd)
|
W W none paging (linux kernel < 2.6 only), wait (freebsd)
|
||||||
```
|
```
|
||||||
|
|
|
@ -34,5 +34,6 @@ $ telegraf --config ~/ws/telegraf.conf --input-filter system --test
|
||||||
* Plugin: system, Collection 1
|
* Plugin: system, Collection 1
|
||||||
* Plugin: inputs.system, Collection 1
|
* Plugin: inputs.system, Collection 1
|
||||||
> system,host=tyrion load1=3.72,load5=2.4,load15=2.1,n_users=3i,n_cpus=4i 1483964144000000000
|
> system,host=tyrion load1=3.72,load5=2.4,load15=2.1,n_users=3i,n_cpus=4i 1483964144000000000
|
||||||
> system,host=tyrion uptime=1249632i,uptime_format="14 days, 11:07" 1483964144000000000
|
> system,host=tyrion uptime=1249632i 1483964144000000000
|
||||||
|
> system,host=tyrion uptime_format="14 days, 11:07" 1483964144000000000
|
||||||
```
|
```
|
||||||
|
|
|
@ -117,6 +117,140 @@ func TestDiskUsage(t *testing.T) {
|
||||||
assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields())
|
assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDiskUsageHostMountPrefix(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
partitionStats []disk.PartitionStat
|
||||||
|
usageStats []*disk.UsageStat
|
||||||
|
hostMountPrefix string
|
||||||
|
expectedTags map[string]string
|
||||||
|
expectedFields map[string]interface{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no host mount prefix",
|
||||||
|
partitionStats: []disk.PartitionStat{
|
||||||
|
{
|
||||||
|
Device: "/dev/sda",
|
||||||
|
Mountpoint: "/",
|
||||||
|
Fstype: "ext4",
|
||||||
|
Opts: "ro",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
usageStats: []*disk.UsageStat{
|
||||||
|
&disk.UsageStat{
|
||||||
|
Path: "/",
|
||||||
|
Total: 42,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedTags: map[string]string{
|
||||||
|
"path": "/",
|
||||||
|
"device": "sda",
|
||||||
|
"fstype": "ext4",
|
||||||
|
"mode": "ro",
|
||||||
|
},
|
||||||
|
expectedFields: map[string]interface{}{
|
||||||
|
"total": uint64(42),
|
||||||
|
"used": uint64(0),
|
||||||
|
"free": uint64(0),
|
||||||
|
"inodes_total": uint64(0),
|
||||||
|
"inodes_free": uint64(0),
|
||||||
|
"inodes_used": uint64(0),
|
||||||
|
"used_percent": float64(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "host mount prefix",
|
||||||
|
partitionStats: []disk.PartitionStat{
|
||||||
|
{
|
||||||
|
Device: "/dev/sda",
|
||||||
|
Mountpoint: "/hostfs/var",
|
||||||
|
Fstype: "ext4",
|
||||||
|
Opts: "ro",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
usageStats: []*disk.UsageStat{
|
||||||
|
&disk.UsageStat{
|
||||||
|
Path: "/hostfs/var",
|
||||||
|
Total: 42,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
hostMountPrefix: "/hostfs",
|
||||||
|
expectedTags: map[string]string{
|
||||||
|
"path": "/var",
|
||||||
|
"device": "sda",
|
||||||
|
"fstype": "ext4",
|
||||||
|
"mode": "ro",
|
||||||
|
},
|
||||||
|
expectedFields: map[string]interface{}{
|
||||||
|
"total": uint64(42),
|
||||||
|
"used": uint64(0),
|
||||||
|
"free": uint64(0),
|
||||||
|
"inodes_total": uint64(0),
|
||||||
|
"inodes_free": uint64(0),
|
||||||
|
"inodes_used": uint64(0),
|
||||||
|
"used_percent": float64(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "host mount prefix exact match",
|
||||||
|
partitionStats: []disk.PartitionStat{
|
||||||
|
{
|
||||||
|
Device: "/dev/sda",
|
||||||
|
Mountpoint: "/hostfs",
|
||||||
|
Fstype: "ext4",
|
||||||
|
Opts: "ro",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
usageStats: []*disk.UsageStat{
|
||||||
|
&disk.UsageStat{
|
||||||
|
Path: "/hostfs",
|
||||||
|
Total: 42,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
hostMountPrefix: "/hostfs",
|
||||||
|
expectedTags: map[string]string{
|
||||||
|
"path": "/",
|
||||||
|
"device": "sda",
|
||||||
|
"fstype": "ext4",
|
||||||
|
"mode": "ro",
|
||||||
|
},
|
||||||
|
expectedFields: map[string]interface{}{
|
||||||
|
"total": uint64(42),
|
||||||
|
"used": uint64(0),
|
||||||
|
"free": uint64(0),
|
||||||
|
"inodes_total": uint64(0),
|
||||||
|
"inodes_free": uint64(0),
|
||||||
|
"inodes_used": uint64(0),
|
||||||
|
"used_percent": float64(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
mck := &mock.Mock{}
|
||||||
|
mps := MockPSDisk{&systemPS{&mockDiskUsage{mck}}, mck}
|
||||||
|
defer mps.AssertExpectations(t)
|
||||||
|
|
||||||
|
var acc testutil.Accumulator
|
||||||
|
var err error
|
||||||
|
|
||||||
|
mps.On("Partitions", true).Return(tt.partitionStats, nil)
|
||||||
|
|
||||||
|
for _, v := range tt.usageStats {
|
||||||
|
mps.On("PSDiskUsage", v.Path).Return(v, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
mps.On("OSGetenv", "HOST_MOUNT_PREFIX").Return(tt.hostMountPrefix)
|
||||||
|
|
||||||
|
err = (&DiskStats{ps: mps}).Gather(&acc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
acc.AssertContainsTaggedFields(t, "disk", tt.expectedFields, tt.expectedTags)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestDiskStats(t *testing.T) {
|
func TestDiskStats(t *testing.T) {
|
||||||
var mps MockPS
|
var mps MockPS
|
||||||
defer mps.AssertExpectations(t)
|
defer mps.AssertExpectations(t)
|
||||||
|
|
|
@ -85,6 +85,7 @@ func getEmptyFields() map[string]interface{} {
|
||||||
fields["dead"] = int64(0)
|
fields["dead"] = int64(0)
|
||||||
fields["paging"] = int64(0)
|
fields["paging"] = int64(0)
|
||||||
fields["total_threads"] = int64(0)
|
fields["total_threads"] = int64(0)
|
||||||
|
fields["idle"] = int64(0)
|
||||||
}
|
}
|
||||||
return fields
|
return fields
|
||||||
}
|
}
|
||||||
|
@ -174,6 +175,8 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
|
||||||
fields["stopped"] = fields["stopped"].(int64) + int64(1)
|
fields["stopped"] = fields["stopped"].(int64) + int64(1)
|
||||||
case 'W':
|
case 'W':
|
||||||
fields["paging"] = fields["paging"].(int64) + int64(1)
|
fields["paging"] = fields["paging"].(int64) + int64(1)
|
||||||
|
case 'I':
|
||||||
|
fields["idle"] = fields["idle"].(int64) + int64(1)
|
||||||
default:
|
default:
|
||||||
log.Printf("I! processes: Unknown state [ %s ] in file %s",
|
log.Printf("I! processes: Unknown state [ %s ] in file %s",
|
||||||
string(stats[0][0]), filename)
|
string(stats[0][0]), filename)
|
||||||
|
|
|
@ -44,7 +44,8 @@ func TestFromPS(t *testing.T) {
|
||||||
fields["zombies"] = int64(1)
|
fields["zombies"] = int64(1)
|
||||||
fields["running"] = int64(4)
|
fields["running"] = int64(4)
|
||||||
fields["sleeping"] = int64(34)
|
fields["sleeping"] = int64(34)
|
||||||
fields["total"] = int64(43)
|
fields["idle"] = int64(2)
|
||||||
|
fields["total"] = int64(45)
|
||||||
|
|
||||||
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
|
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
|
||||||
}
|
}
|
||||||
|
@ -172,6 +173,8 @@ U
|
||||||
Z
|
Z
|
||||||
D
|
D
|
||||||
S+
|
S+
|
||||||
|
I
|
||||||
|
I
|
||||||
`
|
`
|
||||||
|
|
||||||
const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||||
|
|
|
@ -2,6 +2,7 @@ package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
|
@ -129,7 +130,7 @@ func (s *systemPS) DiskUsage(
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
du.Path = strings.TrimPrefix(p.Mountpoint, hostMountPrefix)
|
du.Path = filepath.Join("/", strings.TrimPrefix(p.Mountpoint, hostMountPrefix))
|
||||||
du.Fstype = p.Fstype
|
du.Fstype = p.Fstype
|
||||||
usage = append(usage, du)
|
usage = append(usage, du)
|
||||||
partitions = append(partitions, &p)
|
partitions = append(partitions, &p)
|
||||||
|
|
|
@ -46,7 +46,9 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
||||||
"n_cpus": runtime.NumCPU(),
|
"n_cpus": runtime.NumCPU(),
|
||||||
}, nil)
|
}, nil)
|
||||||
acc.AddCounter("system", map[string]interface{}{
|
acc.AddCounter("system", map[string]interface{}{
|
||||||
"uptime": hostinfo.Uptime,
|
"uptime": hostinfo.Uptime,
|
||||||
|
}, nil)
|
||||||
|
acc.AddFields("system", map[string]interface{}{
|
||||||
"uptime_format": format_uptime(hostinfo.Uptime),
|
"uptime_format": format_uptime(hostinfo.Uptime),
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
|
|
|
@ -284,7 +284,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### .NET Montioring
|
### .NET Monitoring
|
||||||
```
|
```
|
||||||
[[inputs.win_perf_counters.object]]
|
[[inputs.win_perf_counters.object]]
|
||||||
# .NET CLR Exceptions, in this case for IIS only
|
# .NET CLR Exceptions, in this case for IIS only
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||||
"github.com/aws/aws-sdk-go/service/sts"
|
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
||||||
|
@ -71,20 +70,7 @@ func (c *CloudWatch) Connect() error {
|
||||||
Token: c.Token,
|
Token: c.Token,
|
||||||
}
|
}
|
||||||
configProvider := credentialConfig.Credentials()
|
configProvider := credentialConfig.Credentials()
|
||||||
|
|
||||||
stsService := sts.New(configProvider)
|
|
||||||
|
|
||||||
params := &sts.GetSessionTokenInput{}
|
|
||||||
|
|
||||||
_, err := stsService.GetSessionToken(params)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("E! cloudwatch: Cannot use credentials to connect to AWS : %+v \n", err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.svc = cloudwatch.New(configProvider)
|
c.svc = cloudwatch.New(configProvider)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,10 @@ func TestConnectAndWrite(t *testing.T) {
|
||||||
t.Skip("Skipping integration test in short mode")
|
t.Skip("Skipping integration test in short mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" {
|
||||||
|
t.Skip("Skipping test on CircleCI due to docker failures")
|
||||||
|
}
|
||||||
|
|
||||||
url := testURL()
|
url := testURL()
|
||||||
table := "test"
|
table := "test"
|
||||||
|
|
||||||
|
@ -95,6 +99,10 @@ func Test_escapeValue(t *testing.T) {
|
||||||
t.Skip("Skipping integration test in short mode")
|
t.Skip("Skipping integration test in short mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" {
|
||||||
|
t.Skip("Skipping test on CircleCI due to docker failures")
|
||||||
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
Val interface{}
|
Val interface{}
|
||||||
Want string
|
Want string
|
||||||
|
|
|
@ -155,8 +155,22 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||||
batch = append(batch, buf...)
|
batch = append(batch, buf...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = g.send(batch)
|
||||||
|
|
||||||
|
// try to reconnect and retry to send
|
||||||
|
if err != nil {
|
||||||
|
log.Println("E! Graphite: Reconnecting and retrying: ")
|
||||||
|
g.Connect()
|
||||||
|
err = g.send(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Graphite) send(batch []byte) error {
|
||||||
// This will get set to nil if a successful write occurs
|
// This will get set to nil if a successful write occurs
|
||||||
err = errors.New("Could not write to any Graphite server in cluster\n")
|
err := errors.New("Could not write to any Graphite server in cluster\n")
|
||||||
|
|
||||||
// Send data to a random server
|
// Send data to a random server
|
||||||
p := rand.Perm(len(g.conns))
|
p := rand.Perm(len(g.conns))
|
||||||
for _, n := range p {
|
for _, n := range p {
|
||||||
|
@ -167,6 +181,8 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||||
if _, e := g.conns[n].Write(batch); e != nil {
|
if _, e := g.conns[n].Write(batch); e != nil {
|
||||||
// Error
|
// Error
|
||||||
log.Println("E! Graphite Error: " + e.Error())
|
log.Println("E! Graphite Error: " + e.Error())
|
||||||
|
// Close explicitely
|
||||||
|
g.conns[n].Close()
|
||||||
// Let's try the next one
|
// Let's try the next one
|
||||||
} else {
|
} else {
|
||||||
// Success
|
// Success
|
||||||
|
@ -174,11 +190,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// try to reconnect
|
|
||||||
if err != nil {
|
|
||||||
log.Println("E! Reconnecting: ")
|
|
||||||
g.Connect()
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ func TestGraphiteOK(t *testing.T) {
|
||||||
err2 := g.Write(metrics)
|
err2 := g.Write(metrics)
|
||||||
require.NoError(t, err2)
|
require.NoError(t, err2)
|
||||||
|
|
||||||
// Waiting TCPserver
|
// Waiting TCPserver, should reconnect and resend
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
t.Log("Finished Waiting for first data")
|
t.Log("Finished Waiting for first data")
|
||||||
var wg2 sync.WaitGroup
|
var wg2 sync.WaitGroup
|
||||||
|
@ -89,10 +89,8 @@ func TestGraphiteOK(t *testing.T) {
|
||||||
wg2.Add(1)
|
wg2.Add(1)
|
||||||
TCPServer2(t, &wg2)
|
TCPServer2(t, &wg2)
|
||||||
//Write but expect an error, but reconnect
|
//Write but expect an error, but reconnect
|
||||||
g.Write(metrics2)
|
|
||||||
err3 := g.Write(metrics2)
|
err3 := g.Write(metrics2)
|
||||||
t.Log("Finished writing second data, it should have failed")
|
t.Log("Finished writing second data, it should have reconnected automatically")
|
||||||
//Actually write the new metrics
|
|
||||||
|
|
||||||
require.NoError(t, err3)
|
require.NoError(t, err3)
|
||||||
t.Log("Finished writing third data")
|
t.Log("Finished writing third data")
|
||||||
|
|
|
@ -211,11 +211,12 @@ func (c *httpClient) makeRequest(uri string, body io.Reader) (*http.Request, err
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "text/plain; charset=utf-8")
|
||||||
|
|
||||||
for header, value := range c.config.HTTPHeaders {
|
for header, value := range c.config.HTTPHeaders {
|
||||||
req.Header.Set(header, value)
|
req.Header.Set(header, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
req.Header.Set("Content-Type", "text/plain")
|
|
||||||
req.Header.Set("User-Agent", c.config.UserAgent)
|
req.Header.Set("User-Agent", c.config.UserAgent)
|
||||||
if c.config.Username != "" && c.config.Password != "" {
|
if c.config.Username != "" && c.config.Password != "" {
|
||||||
req.SetBasicAuth(c.config.Username, c.config.Password)
|
req.SetBasicAuth(c.config.Username, c.config.Password)
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/internal"
|
"github.com/influxdata/telegraf/internal"
|
||||||
|
@ -25,6 +26,9 @@ var sampleConfig = `
|
||||||
# username = "telegraf"
|
# username = "telegraf"
|
||||||
# password = "metricsmetricsmetricsmetrics"
|
# password = "metricsmetricsmetricsmetrics"
|
||||||
|
|
||||||
|
## Timeout for write operations. default: 5s
|
||||||
|
# timeout = "5s"
|
||||||
|
|
||||||
## client ID, if not set a random ID is generated
|
## client ID, if not set a random ID is generated
|
||||||
# client_id = ""
|
# client_id = ""
|
||||||
|
|
||||||
|
@ -149,7 +153,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error {
|
||||||
|
|
||||||
func (m *MQTT) publish(topic string, body []byte) error {
|
func (m *MQTT) publish(topic string, body []byte) error {
|
||||||
token := m.client.Publish(topic, byte(m.QoS), false, body)
|
token := m.client.Publish(topic, byte(m.QoS), false, body)
|
||||||
token.Wait()
|
token.WaitTimeout(m.Timeout.Duration)
|
||||||
if token.Error() != nil {
|
if token.Error() != nil {
|
||||||
return token.Error()
|
return token.Error()
|
||||||
}
|
}
|
||||||
|
@ -158,6 +162,12 @@ func (m *MQTT) publish(topic string, body []byte) error {
|
||||||
|
|
||||||
func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
|
func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
|
||||||
opts := paho.NewClientOptions()
|
opts := paho.NewClientOptions()
|
||||||
|
opts.KeepAlive = 0 * time.Second
|
||||||
|
|
||||||
|
if m.Timeout.Duration < time.Second {
|
||||||
|
m.Timeout.Duration = 5 * time.Second
|
||||||
|
}
|
||||||
|
opts.WriteTimeout = m.Timeout.Duration
|
||||||
|
|
||||||
if m.ClientID != "" {
|
if m.ClientID != "" {
|
||||||
opts.SetClientID(m.ClientID)
|
opts.SetClientID(m.ClientID)
|
||||||
|
|
|
@ -79,19 +79,28 @@ var sampleConfig = `
|
||||||
`
|
`
|
||||||
|
|
||||||
func (p *PrometheusClient) Start() error {
|
func (p *PrometheusClient) Start() error {
|
||||||
prometheus.Register(p)
|
defaultCollectors := map[string]bool{
|
||||||
|
"gocollector": true,
|
||||||
|
"process": true,
|
||||||
|
}
|
||||||
for _, collector := range p.CollectorsExclude {
|
for _, collector := range p.CollectorsExclude {
|
||||||
|
delete(defaultCollectors, collector)
|
||||||
|
}
|
||||||
|
|
||||||
|
registry := prometheus.NewRegistry()
|
||||||
|
for collector, _ := range defaultCollectors {
|
||||||
switch collector {
|
switch collector {
|
||||||
case "gocollector":
|
case "gocollector":
|
||||||
prometheus.Unregister(prometheus.NewGoCollector())
|
registry.Register(prometheus.NewGoCollector())
|
||||||
case "process":
|
case "process":
|
||||||
prometheus.Unregister(prometheus.NewProcessCollector(os.Getpid(), ""))
|
registry.Register(prometheus.NewProcessCollector(os.Getpid(), ""))
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unrecognized collector %s", collector)
|
return fmt.Errorf("unrecognized collector %s", collector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
registry.Register(p)
|
||||||
|
|
||||||
if p.Listen == "" {
|
if p.Listen == "" {
|
||||||
p.Listen = "localhost:9273"
|
p.Listen = "localhost:9273"
|
||||||
}
|
}
|
||||||
|
@ -102,8 +111,7 @@ func (p *PrometheusClient) Start() error {
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
mux.Handle(p.Path, promhttp.HandlerFor(
|
mux.Handle(p.Path, promhttp.HandlerFor(
|
||||||
prometheus.DefaultGatherer,
|
registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))
|
||||||
promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))
|
|
||||||
|
|
||||||
p.server = &http.Server{
|
p.server = &http.Server{
|
||||||
Addr: p.Listen,
|
Addr: p.Listen,
|
||||||
|
|
|
@ -631,7 +631,7 @@ func setupPrometheus() (*PrometheusClient, *prometheus_input.Prometheus, error)
|
||||||
time.Sleep(time.Millisecond * 200)
|
time.Sleep(time.Millisecond * 200)
|
||||||
|
|
||||||
p := &prometheus_input.Prometheus{
|
p := &prometheus_input.Prometheus{
|
||||||
Urls: []string{"http://localhost:9127/metrics"},
|
URLs: []string{"http://localhost:9127/metrics"},
|
||||||
}
|
}
|
||||||
|
|
||||||
return pTesting, p, nil
|
return pTesting, p, nil
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
"github.com/influxdata/telegraf/plugins/outputs"
|
"github.com/influxdata/telegraf/plugins/outputs"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Wavefront struct {
|
type Wavefront struct {
|
||||||
|
@ -120,6 +121,7 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error {
|
||||||
return fmt.Errorf("Wavefront: TCP connect fail %s", err.Error())
|
return fmt.Errorf("Wavefront: TCP connect fail %s", err.Error())
|
||||||
}
|
}
|
||||||
defer connection.Close()
|
defer connection.Close()
|
||||||
|
connection.SetWriteDeadline(time.Now().Add(5 * time.Second))
|
||||||
|
|
||||||
for _, m := range metrics {
|
for _, m := range metrics {
|
||||||
for _, metricPoint := range buildMetrics(m, w) {
|
for _, metricPoint := range buildMetrics(m, w) {
|
||||||
|
|
|
@ -138,8 +138,11 @@ func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||||
|
|
||||||
// Trim the buffer, even though there should be no padding
|
// Trim the buffer, even though there should be no padding
|
||||||
line := strings.TrimSpace(string(buf))
|
line := strings.TrimSpace(string(buf))
|
||||||
metric, err := p.ParseLine(line)
|
if line == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
metric, err := p.ParseLine(line)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
metrics = append(metrics, metric)
|
metrics = append(metrics, metric)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -133,7 +133,7 @@ func InsertField(bucket, fieldName string) string {
|
||||||
if fieldName == "value" {
|
if fieldName == "value" {
|
||||||
return fieldDeleter.Replace(bucket)
|
return fieldDeleter.Replace(bucket)
|
||||||
}
|
}
|
||||||
return strings.Replace(bucket, "FIELDNAME", strings.Replace(fieldName, ".", "_", -1), 1)
|
return strings.Replace(bucket, "FIELDNAME", fieldName, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildTags(tags map[string]string) string {
|
func buildTags(tags map[string]string) string {
|
||||||
|
|
Loading…
Reference in New Issue