Compare commits
50 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67440c95bb | ||
|
|
39de63d03c | ||
|
|
56edd339e7 | ||
|
|
df768f83af | ||
|
|
8733d3826a | ||
|
|
2bb97154db | ||
|
|
a8d9e458ab | ||
|
|
b464adb08c | ||
|
|
4bd67824ae | ||
|
|
f5894a6a2f | ||
|
|
1790b26651 | ||
|
|
bb3ee1fd39 | ||
|
|
82df5bf2d8 | ||
|
|
8b566b2b9f | ||
|
|
059a751a71 | ||
|
|
dcaa0ca8db | ||
|
|
8777e32d9f | ||
|
|
667940afac | ||
|
|
0605af7c99 | ||
|
|
4e89c17c0f | ||
|
|
45b7db7de1 | ||
|
|
cc478f035e | ||
|
|
fe6239cf9f | ||
|
|
865917f523 | ||
|
|
4aa8d72644 | ||
|
|
384ef6af6b | ||
|
|
07985e6524 | ||
|
|
f8597f78f4 | ||
|
|
83faea7a31 | ||
|
|
223bbf0df7 | ||
|
|
55f35f291d | ||
|
|
6852231c1b | ||
|
|
ce4ca43a5d | ||
|
|
5d6622eb44 | ||
|
|
a1668bbf9a | ||
|
|
fe91c779e9 | ||
|
|
425b6f7d63 | ||
|
|
c322ddb4b0 | ||
|
|
648d3bde33 | ||
|
|
d8da77cb42 | ||
|
|
fdb04702eb | ||
|
|
ecf43f4986 | ||
|
|
e307e92e86 | ||
|
|
8d4a09c3ea | ||
|
|
fd964bd4eb | ||
|
|
994e75f1f0 | ||
|
|
2e2efafbfc | ||
|
|
39537ed86e | ||
|
|
558ce25c94 | ||
|
|
0438f412a9 |
33
CHANGELOG.md
33
CHANGELOG.md
@@ -1,4 +1,29 @@
|
||||
## v1.5 [unreleased]
|
||||
## v1.5.2 [2018-01-30]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3684](https://github.com/influxdata/telegraf/pull/3684): Ignore empty lines in Graphite plaintext.
|
||||
- [#3604](https://github.com/influxdata/telegraf/issues/3604): Fix index out of bounds error in solr input plugin.
|
||||
- [#3680](https://github.com/influxdata/telegraf/pull/3680): Reconnect before sending graphite metrics if disconnected.
|
||||
- [#3693](https://github.com/influxdata/telegraf/pull/3693): Align aggregator period with internal ticker to avoid skipping metrics.
|
||||
- [#3629](https://github.com/influxdata/telegraf/issues/3629): Fix a potential deadlock when using aggregators.
|
||||
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Limit wait time for writes in mqtt output.
|
||||
- [#3698](https://github.com/influxdata/telegraf/issues/3698): Revert change in graphite output where dot in field key was replaced by underscore.
|
||||
- [#3710](https://github.com/influxdata/telegraf/issues/3710): Add timeout to wavefront output write.
|
||||
- [#3725](https://github.com/influxdata/telegraf/issues/3725): Exclude master_replid fields from redis input.
|
||||
|
||||
## v1.5.1 [2018-01-10]
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- [#3624](https://github.com/influxdata/telegraf/pull/3624): Fix name error in jolokia2_agent sample config.
|
||||
- [#3625](https://github.com/influxdata/telegraf/pull/3625): Fix DC/OS login expiration time.
|
||||
- [#3593](https://github.com/influxdata/telegraf/pull/3593): Set Content-Type charset in influxdb output and allow it be overridden.
|
||||
- [#3594](https://github.com/influxdata/telegraf/pull/3594): Document permissions setup for postfix input.
|
||||
- [#3633](https://github.com/influxdata/telegraf/pull/3633): Fix deliver_get field in rabbitmq input.
|
||||
- [#3607](https://github.com/influxdata/telegraf/issues/3607): Escape environment variables during config toml parsing.
|
||||
|
||||
## v1.5 [2017-12-14]
|
||||
|
||||
### New Plugins
|
||||
- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno
|
||||
@@ -9,7 +34,7 @@
|
||||
- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah
|
||||
- [opensmtpd](./plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer
|
||||
- [particle](./plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs
|
||||
- [pf](./plugins/inputs/pf/README.md) Thanks to @nferch
|
||||
- [pf](./plugins/inputs/pf/README.md) - Thanks to @nferch
|
||||
- [postfix](./plugins/inputs/postfix/README.md) - Thanks to @phemmer
|
||||
- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen
|
||||
- [solr](./plugins/inputs/solr/README.md) - Thanks to @ljagiello
|
||||
@@ -78,6 +103,7 @@
|
||||
- [#3140](https://github.com/influxdata/telegraf/pull/3140): Add support for glob patterns in net input plugin.
|
||||
- [#3405](https://github.com/influxdata/telegraf/pull/3405): Add input plugin for OpenBSD/FreeBSD pf.
|
||||
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
|
||||
- [#3530](https://github.com/influxdata/telegraf/pull/3530): Support I (idle) process state on procfs+Linux.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
@@ -92,6 +118,9 @@
|
||||
- [#3263](https://github.com/influxdata/telegraf/issues/3263): Fix snmp-tools output parsing with Windows EOLs.
|
||||
- [#3447](https://github.com/influxdata/telegraf/issues/3447): Add shadow-utils dependency to rpm package.
|
||||
- [#3448](https://github.com/influxdata/telegraf/issues/3448): Use deb-systemd-invoke to restart service.
|
||||
- [#3553](https://github.com/influxdata/telegraf/issues/3553): Fix kafka_consumer outside range of offsets error.
|
||||
- [#3568](https://github.com/influxdata/telegraf/issues/3568): Fix separation of multiple prometheus_client outputs.
|
||||
- [#3577](https://github.com/influxdata/telegraf/issues/3577): Don't add system input uptime_format as a counter.
|
||||
|
||||
## v1.4.5 [2017-12-01]
|
||||
|
||||
|
||||
4
Godeps
4
Godeps
@@ -4,7 +4,7 @@ github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
|
||||
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
|
||||
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bsm/sarama-cluster ccdc0803695fbce22f1706d04ded46cd518fd832
|
||||
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
|
||||
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
|
||||
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
|
||||
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
|
||||
@@ -65,7 +65,7 @@ github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
|
||||
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
|
||||
github.com/shirou/gopsutil 384a55110aa5ae052eb93ea94940548c1e305a99
|
||||
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
|
||||
github.com/Shopify/sarama c01858abb625b73a3af51d0798e4ad42c8147093
|
||||
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
|
||||
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
|
||||
github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
|
||||
8
Makefile
8
Makefile
@@ -88,7 +88,7 @@ docker-run:
|
||||
-d cobaugh/openldap-alpine
|
||||
docker run --name cratedb \
|
||||
-p "6543:5432" \
|
||||
-d crate crate \
|
||||
-d crate:2.2 \
|
||||
-Cnetwork.host=0.0.0.0 \
|
||||
-Ctransport.host=localhost \
|
||||
-Clicense.enterprise=false
|
||||
@@ -116,12 +116,6 @@ docker-run-circle:
|
||||
-e SLAPD_CONFIG_ROOTPW="secret" \
|
||||
-p "389:389" -p "636:636" \
|
||||
-d cobaugh/openldap-alpine
|
||||
docker run --name cratedb \
|
||||
-p "6543:5432" \
|
||||
-d crate crate \
|
||||
-Cnetwork.host=0.0.0.0 \
|
||||
-Ctransport.host=localhost \
|
||||
-Clicense.enterprise=false
|
||||
|
||||
docker-kill:
|
||||
-docker kill aerospike elasticsearch kafka memcached mqtt mysql nats nsq \
|
||||
|
||||
@@ -308,7 +308,13 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag
|
||||
metrics = processor.Apply(metrics...)
|
||||
}
|
||||
for _, m := range metrics {
|
||||
outMetricC <- m
|
||||
for i, o := range a.Config.Outputs {
|
||||
if i == len(a.Config.Outputs)-1 {
|
||||
o.AddMetric(m)
|
||||
} else {
|
||||
o.AddMetric(m.Copy())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -364,8 +370,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
metricC := make(chan telegraf.Metric, 100)
|
||||
aggC := make(chan telegraf.Metric, 100)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Start all ServicePlugins
|
||||
for _, input := range a.Config.Inputs {
|
||||
input.SetDefaultTags(a.Config.Tags)
|
||||
@@ -406,7 +410,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
|
||||
acc := NewAccumulator(agg, aggC)
|
||||
acc.SetPrecision(a.Config.Agent.Precision.Duration,
|
||||
a.Config.Agent.Interval.Duration)
|
||||
agg.Run(acc, now, shutdown)
|
||||
agg.Run(acc, shutdown)
|
||||
}(aggregator)
|
||||
}
|
||||
|
||||
|
||||
@@ -1661,7 +1661,7 @@
|
||||
# # insecure_skip_verify = false
|
||||
#
|
||||
# ## Add metrics to read
|
||||
# [[inputs.jolokia2.metric]]
|
||||
# [[inputs.jolokia2_agent.metric]]
|
||||
# name = "java_runtime"
|
||||
# mbean = "java.lang:type=Runtime"
|
||||
# paths = ["Uptime"]
|
||||
|
||||
@@ -40,6 +40,11 @@ var (
|
||||
|
||||
// envVarRe is a regex to find environment variables in the config file
|
||||
envVarRe = regexp.MustCompile(`\$\w+`)
|
||||
|
||||
envVarEscaper = strings.NewReplacer(
|
||||
`"`, `\"`,
|
||||
`\`, `\\`,
|
||||
)
|
||||
)
|
||||
|
||||
// Config specifies the URL/user/password for the database that telegraf
|
||||
@@ -689,6 +694,11 @@ func trimBOM(f []byte) []byte {
|
||||
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
|
||||
}
|
||||
|
||||
// escapeEnv escapes a value for inserting into a TOML string.
|
||||
func escapeEnv(value string) string {
|
||||
return envVarEscaper.Replace(value)
|
||||
}
|
||||
|
||||
// parseFile loads a TOML configuration from a provided path and
|
||||
// returns the AST produced from the TOML parser. When loading the file, it
|
||||
// will find environment variables and replace them.
|
||||
@@ -702,8 +712,9 @@ func parseFile(fpath string) (*ast.Table, error) {
|
||||
|
||||
env_vars := envVarRe.FindAll(contents, -1)
|
||||
for _, env_var := range env_vars {
|
||||
env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$"))
|
||||
if env_val != "" {
|
||||
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
|
||||
if ok {
|
||||
env_val = escapeEnv(env_val)
|
||||
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,7 +114,6 @@ func (r *RunningAggregator) reset() {
|
||||
// for period ticks to tell it when to push and reset the aggregator.
|
||||
func (r *RunningAggregator) Run(
|
||||
acc telegraf.Accumulator,
|
||||
now time.Time,
|
||||
shutdown chan struct{},
|
||||
) {
|
||||
// The start of the period is truncated to the nearest second.
|
||||
@@ -133,6 +132,7 @@ func (r *RunningAggregator) Run(
|
||||
// 2nd interval: 00:10 - 00:20.5
|
||||
// etc.
|
||||
//
|
||||
now := time.Now()
|
||||
r.periodStart = now.Truncate(time.Second)
|
||||
truncation := now.Sub(r.periodStart)
|
||||
r.periodEnd = r.periodStart.Add(r.Config.Period)
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestAdd(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
m := ra.MakeMetric(
|
||||
"RITest",
|
||||
@@ -55,7 +55,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, ra.Config.Filter.Compile())
|
||||
acc := testutil.Accumulator{}
|
||||
go ra.Run(&acc, time.Now(), make(chan struct{}))
|
||||
go ra.Run(&acc, make(chan struct{}))
|
||||
|
||||
// metric before current period
|
||||
m := ra.MakeMetric(
|
||||
@@ -113,7 +113,7 @@ func TestAddAndPushOnePeriod(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ra.Run(&acc, time.Now(), shutdown)
|
||||
ra.Run(&acc, shutdown)
|
||||
}()
|
||||
|
||||
m := ra.MakeMetric(
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Bond Input Plugin
|
||||
|
||||
The Bond Input plugin collects bond interface status, bond's slaves interfaces
|
||||
The Bond Input plugin collects network bond interface status, bond's slaves interfaces
|
||||
status and failures count of bond's slaves interfaces.
|
||||
The plugin collects these metrics from `/proc/net/bonding/*` files.
|
||||
|
||||
|
||||
@@ -325,7 +325,7 @@ func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) {
|
||||
UID: sa.AccountID,
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
// How long we have to login with this token
|
||||
ExpiresAt: int64(5 * time.Minute / time.Second),
|
||||
ExpiresAt: time.Now().Add(5 * time.Minute).Unix(),
|
||||
},
|
||||
})
|
||||
return token.SignedString(sa.PrivateKey)
|
||||
|
||||
@@ -46,7 +46,7 @@ func (ja *JolokiaAgent) SampleConfig() string {
|
||||
# insecure_skip_verify = false
|
||||
|
||||
## Add metrics to read
|
||||
[[inputs.jolokia2.metric]]
|
||||
[[inputs.jolokia2_agent.metric]]
|
||||
name = "java_runtime"
|
||||
mbean = "java.lang:type=Runtime"
|
||||
paths = ["Uptime"]
|
||||
|
||||
@@ -4,6 +4,8 @@ This plugin gathers metrics from OpenLDAP's cn=Monitor backend.
|
||||
|
||||
### Configuration:
|
||||
|
||||
To use this plugin you must enable the [monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend.
|
||||
|
||||
```toml
|
||||
[[inputs.openldap]]
|
||||
host = "localhost"
|
||||
|
||||
@@ -13,6 +13,25 @@ For each of the active, hold, incoming, maildrop, and deferred queues (http://ww
|
||||
# queue_directory = "/var/spool/postfix"
|
||||
```
|
||||
|
||||
#### Permissions:
|
||||
|
||||
Telegraf will need read access to the files in the queue directory. You may
|
||||
need to alter the permissions of these directories to provide access to the
|
||||
telegraf user.
|
||||
|
||||
Unix permissions:
|
||||
```sh
|
||||
$ sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred}
|
||||
$ sudo chmod -R g+rXs /var/spool/postfix/{active,hold,incoming,deferred}
|
||||
$ sudo usermod -a -G postdrop telegraf
|
||||
$ sudo chmod g+r /var/spool/postfix/maildrop
|
||||
```
|
||||
|
||||
Posix ACL:
|
||||
```sh
|
||||
$ sudo setfacl -Rdm u:telegraf:rX /var/spool/postfix/{active,hold,incoming,deferred,maildrop}
|
||||
```
|
||||
|
||||
### Measurements & Fields:
|
||||
|
||||
- postfix_queue
|
||||
|
||||
@@ -72,7 +72,7 @@ type MessageStats struct {
|
||||
AckDetails Details `json:"ack_details"`
|
||||
Deliver int64
|
||||
DeliverDetails Details `json:"deliver_details"`
|
||||
DeliverGet int64
|
||||
DeliverGet int64 `json:"deliver_get"`
|
||||
DeliverGetDetails Details `json:"deliver_get_details"`
|
||||
Publish int64
|
||||
PublishDetails Details `json:"publish_details"`
|
||||
|
||||
@@ -88,6 +88,7 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a
|
||||
**Replication**
|
||||
- connected_slaves(int, number)
|
||||
- master_repl_offset(int, number)
|
||||
- second_repl_offset(int, number)
|
||||
- repl_backlog_active(int, number)
|
||||
- repl_backlog_size(int, bytes)
|
||||
- repl_backlog_first_byte_offset(int, number)
|
||||
|
||||
@@ -189,6 +189,10 @@ func gatherInfoOutput(
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "master_replid") {
|
||||
continue
|
||||
}
|
||||
|
||||
if name == "mem_allocator" {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -86,6 +86,7 @@ func TestRedis_ParseMetrics(t *testing.T) {
|
||||
"repl_backlog_size": int64(1048576),
|
||||
"repl_backlog_first_byte_offset": int64(0),
|
||||
"repl_backlog_histlen": int64(0),
|
||||
"second_repl_offset": int64(-1),
|
||||
"used_cpu_sys": float64(0.14),
|
||||
"used_cpu_user": float64(0.05),
|
||||
"used_cpu_sys_children": float64(0.00),
|
||||
@@ -189,7 +190,10 @@ latest_fork_usec:0
|
||||
# Replication
|
||||
role:master
|
||||
connected_slaves:0
|
||||
master_replid:8c4d7b768b26826825ceb20ff4a2c7c54616350b
|
||||
master_replid2:0000000000000000000000000000000000000000
|
||||
master_repl_offset:0
|
||||
second_repl_offset:-1
|
||||
repl_backlog_active:0
|
||||
repl_backlog_size:1048576
|
||||
repl_backlog_first_byte_offset:0
|
||||
|
||||
@@ -129,7 +129,7 @@ the configuration to execute that.
|
||||
|
||||
Example output from an _Apple SSD_:
|
||||
```
|
||||
> smart_attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=199,name=UDMA_CRC_Error_Count,flags=-O-RC-,fail=-,host=mbpro.local,device=/dev/rdisk0 threshold=0i,raw_value=0i,exit_status=0i,value=200i,worst=200i 1502536854000000000
|
||||
> smart_attribute,device=/dev/rdisk0,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=240,name=Unknown_SSD_Attribute,flags=-O---K,fail=-,host=mbpro.local exit_status=0i,value=100i,worst=100i,threshold=0i,raw_value=0i 1502536854000000000
|
||||
> smart_device,enabled=Enabled,host=mbpro.local,device=/dev/rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000
|
||||
> smart_attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=199,name=UDMA_CRC_Error_Count,flags=-O-RC-,fail=-,host=mbpro.local,device=rdisk0 threshold=0i,raw_value=0i,exit_status=0i,value=200i,worst=200i 1502536854000000000
|
||||
> smart_attribute,device=rdisk0,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=240,name=Unknown_SSD_Attribute,flags=-O---K,fail=-,host=mbpro.local exit_status=0i,value=100i,worst=100i,threshold=0i,raw_value=0i 1502536854000000000
|
||||
> smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000
|
||||
```
|
||||
|
||||
@@ -3,6 +3,7 @@ package smart
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -134,7 +135,7 @@ func (m *Smart) scan() ([]string, error) {
|
||||
|
||||
devices := []string{}
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
dev := strings.Split(line, "#")
|
||||
dev := strings.Split(line, " ")
|
||||
if len(dev) > 1 && !excludedDev(m.Excludes, strings.TrimSpace(dev[0])) {
|
||||
devices = append(devices, strings.TrimSpace(dev[0]))
|
||||
}
|
||||
@@ -178,13 +179,13 @@ func exitStatus(err error) (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, path, nockeck, device string, wg *sync.WaitGroup) {
|
||||
func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, smartctl, nockeck, device string, wg *sync.WaitGroup) {
|
||||
|
||||
defer wg.Done()
|
||||
// smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n
|
||||
args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nockeck, "--format=brief"}
|
||||
args = append(args, strings.Split(device, " ")...)
|
||||
cmd := sudo(usesudo, path, args...)
|
||||
cmd := sudo(usesudo, smartctl, args...)
|
||||
out, e := internal.CombinedOutputTimeout(cmd, time.Second*5)
|
||||
outStr := string(out)
|
||||
|
||||
@@ -196,7 +197,8 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, path, nockec
|
||||
}
|
||||
|
||||
device_tags := map[string]string{}
|
||||
device_tags["device"] = strings.Split(device, " ")[0]
|
||||
device_node := strings.Split(device, " ")[0]
|
||||
device_tags["device"] = path.Base(device_node)
|
||||
device_fields := make(map[string]interface{})
|
||||
device_fields["exit_status"] = exitStatus
|
||||
|
||||
@@ -240,7 +242,8 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, path, nockec
|
||||
tags := map[string]string{}
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
tags["device"] = strings.Split(device, " ")[0]
|
||||
device_node := strings.Split(device, " ")[0]
|
||||
tags["device"] = path.Base(device_node)
|
||||
|
||||
if serial, ok := device_tags["serial_no"]; ok {
|
||||
tags["serial_no"] = serial
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "1",
|
||||
@@ -107,7 +107,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "5",
|
||||
@@ -125,7 +125,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "9",
|
||||
@@ -143,7 +143,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "12",
|
||||
@@ -161,7 +161,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "169",
|
||||
@@ -179,7 +179,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "173",
|
||||
@@ -197,7 +197,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "190",
|
||||
@@ -215,7 +215,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "192",
|
||||
@@ -233,7 +233,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "194",
|
||||
@@ -251,7 +251,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "197",
|
||||
@@ -269,7 +269,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "199",
|
||||
@@ -287,7 +287,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"exit_status": int(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
"id": "240",
|
||||
@@ -317,7 +317,7 @@ func TestGatherAttributes(t *testing.T) {
|
||||
"udma_crc_errors": int64(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"model": "APPLE SSD SM256E",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
@@ -363,7 +363,7 @@ func TestGatherNoAttributes(t *testing.T) {
|
||||
"udma_crc_errors": int64(0),
|
||||
},
|
||||
map[string]string{
|
||||
"device": "/dev/ada0",
|
||||
"device": "ada0",
|
||||
"model": "APPLE SSD SM256E",
|
||||
"serial_no": "S0X5NZBC422720",
|
||||
"wwn": "5002538043584d30",
|
||||
|
||||
@@ -246,6 +246,9 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo
|
||||
// Add core metrics section to accumulator
|
||||
func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||
var coreMetrics map[string]Core
|
||||
if len(mBeansData.SolrMbeans) < 2 {
|
||||
return fmt.Errorf("no core metric data to unmarshall")
|
||||
}
|
||||
if err := json.Unmarshal(mBeansData.SolrMbeans[1], &coreMetrics); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -274,9 +277,14 @@ func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBea
|
||||
func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||
var queryMetrics map[string]QueryHandler
|
||||
|
||||
if len(mBeansData.SolrMbeans) < 4 {
|
||||
return fmt.Errorf("no query handler metric data to unmarshall")
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(mBeansData.SolrMbeans[3], &queryMetrics); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, metrics := range queryMetrics {
|
||||
coreFields := map[string]interface{}{
|
||||
"15min_rate_reqs_per_second": metrics.Stats.One5minRateReqsPerSecond,
|
||||
@@ -310,6 +318,9 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa
|
||||
func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||
var updateMetrics map[string]UpdateHandler
|
||||
|
||||
if len(mBeansData.SolrMbeans) < 6 {
|
||||
return fmt.Errorf("no update handler metric data to unmarshall")
|
||||
}
|
||||
if err := json.Unmarshal(mBeansData.SolrMbeans[5], &updateMetrics); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -364,6 +375,9 @@ func getFloat(unk interface{}) float64 {
|
||||
|
||||
// Add cache metrics section to accumulator
|
||||
func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||
if len(mBeansData.SolrMbeans) < 8 {
|
||||
return fmt.Errorf("no cache metric data to unmarshall")
|
||||
}
|
||||
var cacheMetrics map[string]Cache
|
||||
if err := json.Unmarshal(mBeansData.SolrMbeans[7], &cacheMetrics); err != nil {
|
||||
return err
|
||||
|
||||
@@ -60,3 +60,44 @@ func createMockServer() *httptest.Server {
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
func TestNoCoreDataHandling(t *testing.T) {
|
||||
ts := createMockNoCoreDataServer()
|
||||
solr := NewSolr()
|
||||
solr.Servers = []string{ts.URL}
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, solr.Gather(&acc))
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "solr_admin",
|
||||
solrAdminMainCoreStatusExpected,
|
||||
map[string]string{"core": "main"})
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "solr_admin",
|
||||
solrAdminCore1StatusExpected,
|
||||
map[string]string{"core": "core1"})
|
||||
|
||||
acc.AssertDoesNotContainMeasurement(t, "solr_core")
|
||||
acc.AssertDoesNotContainMeasurement(t, "solr_queryhandler")
|
||||
acc.AssertDoesNotContainMeasurement(t, "solr_updatehandler")
|
||||
acc.AssertDoesNotContainMeasurement(t, "solr_handler")
|
||||
|
||||
}
|
||||
|
||||
func createMockNoCoreDataServer() *httptest.Server {
|
||||
var nodata string
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.Contains(r.URL.Path, "/solr/admin/cores") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, statusResponse)
|
||||
} else if strings.Contains(r.URL.Path, "solr/main/admin") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, nodata)
|
||||
} else if strings.Contains(r.URL.Path, "solr/core1/admin") {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, nodata)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
fmt.Fprintln(w, "nope")
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info
|
||||
- zombie
|
||||
- dead
|
||||
- wait (freebsd only)
|
||||
- idle (bsd only)
|
||||
- idle (bsd and Linux 4+ only)
|
||||
- paging (linux only)
|
||||
- total_threads (linux only)
|
||||
|
||||
@@ -47,7 +47,7 @@ Linux FreeBSD Darwin meaning
|
||||
Z Z Z zombie
|
||||
X none none dead
|
||||
T T T stopped
|
||||
none I I idle (sleeping for longer than about 20 seconds)
|
||||
I I I idle (sleeping for longer than about 20 seconds)
|
||||
D D,L U blocked (waiting in uninterruptible sleep, or locked)
|
||||
W W none paging (linux kernel < 2.6 only), wait (freebsd)
|
||||
```
|
||||
|
||||
@@ -34,5 +34,6 @@ $ telegraf --config ~/ws/telegraf.conf --input-filter system --test
|
||||
* Plugin: system, Collection 1
|
||||
* Plugin: inputs.system, Collection 1
|
||||
> system,host=tyrion load1=3.72,load5=2.4,load15=2.1,n_users=3i,n_cpus=4i 1483964144000000000
|
||||
> system,host=tyrion uptime=1249632i,uptime_format="14 days, 11:07" 1483964144000000000
|
||||
> system,host=tyrion uptime=1249632i 1483964144000000000
|
||||
> system,host=tyrion uptime_format="14 days, 11:07" 1483964144000000000
|
||||
```
|
||||
|
||||
@@ -85,6 +85,7 @@ func getEmptyFields() map[string]interface{} {
|
||||
fields["dead"] = int64(0)
|
||||
fields["paging"] = int64(0)
|
||||
fields["total_threads"] = int64(0)
|
||||
fields["idle"] = int64(0)
|
||||
}
|
||||
return fields
|
||||
}
|
||||
@@ -174,6 +175,8 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error {
|
||||
fields["stopped"] = fields["stopped"].(int64) + int64(1)
|
||||
case 'W':
|
||||
fields["paging"] = fields["paging"].(int64) + int64(1)
|
||||
case 'I':
|
||||
fields["idle"] = fields["idle"].(int64) + int64(1)
|
||||
default:
|
||||
log.Printf("I! processes: Unknown state [ %s ] in file %s",
|
||||
string(stats[0][0]), filename)
|
||||
|
||||
@@ -44,7 +44,8 @@ func TestFromPS(t *testing.T) {
|
||||
fields["zombies"] = int64(1)
|
||||
fields["running"] = int64(4)
|
||||
fields["sleeping"] = int64(34)
|
||||
fields["total"] = int64(43)
|
||||
fields["idle"] = int64(2)
|
||||
fields["total"] = int64(45)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{})
|
||||
}
|
||||
@@ -172,6 +173,8 @@ U
|
||||
Z
|
||||
D
|
||||
S+
|
||||
I
|
||||
I
|
||||
`
|
||||
|
||||
const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
|
||||
@@ -46,7 +46,9 @@ func (_ *SystemStats) Gather(acc telegraf.Accumulator) error {
|
||||
"n_cpus": runtime.NumCPU(),
|
||||
}, nil)
|
||||
acc.AddCounter("system", map[string]interface{}{
|
||||
"uptime": hostinfo.Uptime,
|
||||
"uptime": hostinfo.Uptime,
|
||||
}, nil)
|
||||
acc.AddFields("system", map[string]interface{}{
|
||||
"uptime_format": format_uptime(hostinfo.Uptime),
|
||||
}, nil)
|
||||
|
||||
|
||||
@@ -284,7 +284,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid.
|
||||
```
|
||||
|
||||
|
||||
### .NET Montioring
|
||||
### .NET Monitoring
|
||||
```
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# .NET CLR Exceptions, in this case for IIS only
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
internalaws "github.com/influxdata/telegraf/internal/config/aws"
|
||||
@@ -71,20 +70,7 @@ func (c *CloudWatch) Connect() error {
|
||||
Token: c.Token,
|
||||
}
|
||||
configProvider := credentialConfig.Credentials()
|
||||
|
||||
stsService := sts.New(configProvider)
|
||||
|
||||
params := &sts.GetSessionTokenInput{}
|
||||
|
||||
_, err := stsService.GetSessionToken(params)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("E! cloudwatch: Cannot use credentials to connect to AWS : %+v \n", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
c.svc = cloudwatch.New(configProvider)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,10 @@ func TestConnectAndWrite(t *testing.T) {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" {
|
||||
t.Skip("Skipping test on CircleCI due to docker failures")
|
||||
}
|
||||
|
||||
url := testURL()
|
||||
table := "test"
|
||||
|
||||
@@ -95,6 +99,10 @@ func Test_escapeValue(t *testing.T) {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" {
|
||||
t.Skip("Skipping test on CircleCI due to docker failures")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
Val interface{}
|
||||
Want string
|
||||
|
||||
@@ -155,8 +155,22 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||
batch = append(batch, buf...)
|
||||
}
|
||||
|
||||
err = g.send(batch)
|
||||
|
||||
// try to reconnect and retry to send
|
||||
if err != nil {
|
||||
log.Println("E! Graphite: Reconnecting and retrying: ")
|
||||
g.Connect()
|
||||
err = g.send(batch)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (g *Graphite) send(batch []byte) error {
|
||||
// This will get set to nil if a successful write occurs
|
||||
err = errors.New("Could not write to any Graphite server in cluster\n")
|
||||
err := errors.New("Could not write to any Graphite server in cluster\n")
|
||||
|
||||
// Send data to a random server
|
||||
p := rand.Perm(len(g.conns))
|
||||
for _, n := range p {
|
||||
@@ -167,6 +181,8 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||
if _, e := g.conns[n].Write(batch); e != nil {
|
||||
// Error
|
||||
log.Println("E! Graphite Error: " + e.Error())
|
||||
// Close explicitely
|
||||
g.conns[n].Close()
|
||||
// Let's try the next one
|
||||
} else {
|
||||
// Success
|
||||
@@ -174,11 +190,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
// try to reconnect
|
||||
if err != nil {
|
||||
log.Println("E! Reconnecting: ")
|
||||
g.Connect()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestGraphiteOK(t *testing.T) {
|
||||
err2 := g.Write(metrics)
|
||||
require.NoError(t, err2)
|
||||
|
||||
// Waiting TCPserver
|
||||
// Waiting TCPserver, should reconnect and resend
|
||||
wg.Wait()
|
||||
t.Log("Finished Waiting for first data")
|
||||
var wg2 sync.WaitGroup
|
||||
@@ -89,10 +89,8 @@ func TestGraphiteOK(t *testing.T) {
|
||||
wg2.Add(1)
|
||||
TCPServer2(t, &wg2)
|
||||
//Write but expect an error, but reconnect
|
||||
g.Write(metrics2)
|
||||
err3 := g.Write(metrics2)
|
||||
t.Log("Finished writing second data, it should have failed")
|
||||
//Actually write the new metrics
|
||||
t.Log("Finished writing second data, it should have reconnected automatically")
|
||||
|
||||
require.NoError(t, err3)
|
||||
t.Log("Finished writing third data")
|
||||
|
||||
@@ -211,11 +211,12 @@ func (c *httpClient) makeRequest(uri string, body io.Reader) (*http.Request, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "text/plain; charset=utf-8")
|
||||
|
||||
for header, value := range c.config.HTTPHeaders {
|
||||
req.Header.Set(header, value)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
req.Header.Set("User-Agent", c.config.UserAgent)
|
||||
if c.config.Username != "" && c.config.Password != "" {
|
||||
req.SetBasicAuth(c.config.Username, c.config.Password)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/internal"
|
||||
@@ -25,6 +26,9 @@ var sampleConfig = `
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
|
||||
## Timeout for write operations. default: 5s
|
||||
# timeout = "5s"
|
||||
|
||||
## client ID, if not set a random ID is generated
|
||||
# client_id = ""
|
||||
|
||||
@@ -149,7 +153,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error {
|
||||
|
||||
func (m *MQTT) publish(topic string, body []byte) error {
|
||||
token := m.client.Publish(topic, byte(m.QoS), false, body)
|
||||
token.Wait()
|
||||
token.WaitTimeout(m.Timeout.Duration)
|
||||
if token.Error() != nil {
|
||||
return token.Error()
|
||||
}
|
||||
@@ -159,6 +163,11 @@ func (m *MQTT) publish(topic string, body []byte) error {
|
||||
func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
|
||||
opts := paho.NewClientOptions()
|
||||
|
||||
if m.Timeout.Duration < time.Second {
|
||||
m.Timeout.Duration = 5 * time.Second
|
||||
}
|
||||
opts.WriteTimeout = m.Timeout.Duration
|
||||
|
||||
if m.ClientID != "" {
|
||||
opts.SetClientID(m.ClientID)
|
||||
} else {
|
||||
|
||||
@@ -79,19 +79,28 @@ var sampleConfig = `
|
||||
`
|
||||
|
||||
func (p *PrometheusClient) Start() error {
|
||||
prometheus.Register(p)
|
||||
|
||||
defaultCollectors := map[string]bool{
|
||||
"gocollector": true,
|
||||
"process": true,
|
||||
}
|
||||
for _, collector := range p.CollectorsExclude {
|
||||
delete(defaultCollectors, collector)
|
||||
}
|
||||
|
||||
registry := prometheus.NewRegistry()
|
||||
for collector, _ := range defaultCollectors {
|
||||
switch collector {
|
||||
case "gocollector":
|
||||
prometheus.Unregister(prometheus.NewGoCollector())
|
||||
registry.Register(prometheus.NewGoCollector())
|
||||
case "process":
|
||||
prometheus.Unregister(prometheus.NewProcessCollector(os.Getpid(), ""))
|
||||
registry.Register(prometheus.NewProcessCollector(os.Getpid(), ""))
|
||||
default:
|
||||
return fmt.Errorf("unrecognized collector %s", collector)
|
||||
}
|
||||
}
|
||||
|
||||
registry.Register(p)
|
||||
|
||||
if p.Listen == "" {
|
||||
p.Listen = "localhost:9273"
|
||||
}
|
||||
@@ -102,8 +111,7 @@ func (p *PrometheusClient) Start() error {
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(p.Path, promhttp.HandlerFor(
|
||||
prometheus.DefaultGatherer,
|
||||
promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))
|
||||
registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))
|
||||
|
||||
p.server = &http.Server{
|
||||
Addr: p.Listen,
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/outputs"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Wavefront struct {
|
||||
@@ -120,6 +121,7 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error {
|
||||
return fmt.Errorf("Wavefront: TCP connect fail %s", err.Error())
|
||||
}
|
||||
defer connection.Close()
|
||||
connection.SetWriteDeadline(time.Now().Add(5 * time.Second))
|
||||
|
||||
for _, m := range metrics {
|
||||
for _, metricPoint := range buildMetrics(m, w) {
|
||||
|
||||
@@ -138,8 +138,11 @@ func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) {
|
||||
|
||||
// Trim the buffer, even though there should be no padding
|
||||
line := strings.TrimSpace(string(buf))
|
||||
metric, err := p.ParseLine(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
metric, err := p.ParseLine(line)
|
||||
if err == nil {
|
||||
metrics = append(metrics, metric)
|
||||
} else {
|
||||
|
||||
@@ -133,7 +133,7 @@ func InsertField(bucket, fieldName string) string {
|
||||
if fieldName == "value" {
|
||||
return fieldDeleter.Replace(bucket)
|
||||
}
|
||||
return strings.Replace(bucket, "FIELDNAME", strings.Replace(fieldName, ".", "_", -1), 1)
|
||||
return strings.Replace(bucket, "FIELDNAME", fieldName, 1)
|
||||
}
|
||||
|
||||
func buildTags(tags map[string]string) string {
|
||||
|
||||
Reference in New Issue
Block a user