Compare commits

..

18 Commits
1.5.2 ... 1.5.3

Author SHA1 Message Date
Daniel Nelson
1e51969813 Set 1.5.3 release date
(cherry picked from commit 2160779126)
2018-03-14 16:34:56 -07:00
Daniel Nelson
51b097a7c6 Use previous image on appveyor
(cherry picked from commit f1b681cbdc)
2018-03-14 16:31:18 -07:00
Daniel Nelson
77dfb8c9c5 Update changelog
(cherry picked from commit 6e5e2f713d)
2018-03-14 14:25:53 -07:00
Jonas Hahnfeld
1398f8e678 Add output of stderr in case of error to exec log message (#3862)
If the command failed with a non-zero exit status there might be an error
message on stderr. Append the first line to the error message to ease the
search for its cause.

(cherry picked from commit 8e515688eb)
2018-03-14 14:25:53 -07:00
Daniel Nelson
d96483bffb Use Go 1.9.4 in builds 2018-03-09 14:37:53 -08:00
Daniel Nelson
5e534676a0 Update changelog
(cherry picked from commit f7207f514e)
2018-03-08 10:55:02 -08:00
Dennis Schön
9329200afa Fix uptime metric in passenger input plugin (#3871)
(cherry picked from commit f1c8abd68c)
2018-03-08 10:55:02 -08:00
Daniel Nelson
645b8b905d Update changelog
(cherry picked from commit e4ce057885)
2018-03-07 14:17:36 -08:00
dilshatm
ea7d884c09 Fix collation difference in sqlserver input (#3786)
(cherry picked from commit a6d366fb84)
2018-03-07 14:17:33 -08:00
Daniel Nelson
7f94cb58e4 Update changelog
(cherry picked from commit 5928219454)
2018-02-25 01:07:16 -08:00
Daniel Nelson
d8f2d4af0f Disable keepalive in mqtt output. (#3779)
This functionality currently has race conditions that can result in the
output deadlocking.

(cherry picked from commit 8c932abff6)
2018-02-25 01:07:16 -08:00
Daniel Nelson
d8dae1b1ab Fix memory leak in postgresql_extensible 2018-02-20 11:58:43 -08:00
Daniel Nelson
770cf4e0b6 Update changelog
(cherry picked from commit a00d5b48f8)
2018-02-09 12:13:49 -08:00
efficks
8cb5391f4e Fix ping plugin not reporting zero durations (#3778)
(cherry picked from commit f5ea13a9ab)
2018-02-09 12:13:48 -08:00
Daniel Nelson
c5ddb65ad9 Update changelog
(cherry picked from commit 9a1d69a2ae)
2018-02-05 11:23:03 -08:00
Philipp Weber
d671299e96 Remove userinfo from url tag in prometheus input (#3743)
(cherry picked from commit b7a68eef56)
2018-02-05 11:23:02 -08:00
Daniel Nelson
f59231941f Update changelog
(cherry picked from commit 32732d42f8)
2018-01-30 18:09:24 -08:00
Daniel Nelson
100bdfba6c Set path to / if HOST_MOUNT_PREFIX matches full path (#3736)
(cherry picked from commit 10e51e4b49)
2018-01-30 18:08:50 -08:00
17 changed files with 258 additions and 70 deletions

View File

@@ -1,3 +1,16 @@
## v1.5.3 [2018-03-14]
### Bugfixes
- [#3729](https://github.com/influxdata/telegraf/issues/3729): Set path to / if HOST_MOUNT_PREFIX matches full path.
- [#3739](https://github.com/influxdata/telegraf/issues/3739): Remove userinfo from url tag in prometheus input.
- [#3778](https://github.com/influxdata/telegraf/issues/3778): Fix ping plugin not reporting zero durations.
- [#3807](https://github.com/influxdata/telegraf/issues/3807): Fix memory leak in postgresql_extensible.
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Disable keepalive in mqtt output to prevent deadlock.
- [#3786](https://github.com/influxdata/telegraf/pull/3786): Fix collation difference in sqlserver input.
- [#3871](https://github.com/influxdata/telegraf/pull/3871): Fix uptime metric in passenger input plugin.
- [#3851](https://github.com/influxdata/telegraf/issues/3851): Add output of stderr in case of error to exec log message.
## v1.5.2 [2018-01-30]
### Bugfixes

View File

@@ -1,3 +1,4 @@
image: Previous Visual Studio 2015
version: "{build}"
cache:
@@ -12,11 +13,11 @@ platform: x64
install:
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
- IF NOT EXIST "C:\Cache\go1.9.2.msi" curl -o "C:\Cache\go1.9.2.msi" https://storage.googleapis.com/golang/go1.9.2.windows-amd64.msi
- IF NOT EXIST "C:\Cache\go1.9.4.msi" curl -o "C:\Cache\go1.9.4.msi" https://storage.googleapis.com/golang/go1.9.4.windows-amd64.msi
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
- msiexec.exe /i "C:\Cache\go1.9.2.msi" /quiet
- msiexec.exe /i "C:\Cache\go1.9.4.msi" /quiet
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version

View File

@@ -6,8 +6,8 @@ machine:
- rabbitmq-server
post:
- sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.9.2.linux-amd64.tar.gz
- wget https://storage.googleapis.com/golang/go1.9.4.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.9.4.linux-amd64.tar.gz
- go version
dependencies:

View File

@@ -41,6 +41,8 @@ const sampleConfig = `
data_format = "influx"
`
const MaxStderrBytes = 512
type Exec struct {
Commands []string
Command string
@@ -96,15 +98,41 @@ func (c CommandRunner) Run(
cmd := exec.Command(split_cmd[0], split_cmd[1:]...)
var out bytes.Buffer
var (
out bytes.Buffer
stderr bytes.Buffer
)
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := internal.RunTimeout(cmd, e.Timeout.Duration); err != nil {
switch e.parser.(type) {
case *nagios.NagiosParser:
AddNagiosState(err, acc)
default:
return nil, fmt.Errorf("exec: %s for command '%s'", err, command)
var errMessage = ""
if stderr.Len() > 0 {
stderr = removeCarriageReturns(stderr)
// Limit the number of bytes.
didTruncate := false
if stderr.Len() > MaxStderrBytes {
stderr.Truncate(MaxStderrBytes)
didTruncate = true
}
if i := bytes.IndexByte(stderr.Bytes(), '\n'); i > 0 {
// Only show truncation if the newline wasn't the last character.
if i < stderr.Len()-1 {
didTruncate = true
}
stderr.Truncate(i)
}
if didTruncate {
stderr.WriteString("...")
}
errMessage = fmt.Sprintf(": %s", stderr.String())
}
return nil, fmt.Errorf("exec: %s for command '%s'%s", err, command, errMessage)
}
} else {
switch e.parser.(type) {

View File

@@ -102,7 +102,7 @@ func (p *process) getUptime() int64 {
uptime += value * (24 * 60 * 60)
}
case strings.HasSuffix(v, "h"):
iValue := strings.TrimSuffix(v, "y")
iValue := strings.TrimSuffix(v, "h")
value, err := strconv.ParseInt(iValue, 10, 64)
if err == nil {
uptime += value * (60 * 60)

View File

@@ -126,7 +126,7 @@ func TestPassengerGenerateMetric(t *testing.T) {
"spawn_start_time": int64(1452746844946982),
"spawn_end_time": int64(1452746845013365),
"last_used": int64(1452747071764940),
"uptime": int64(226), // in seconds of 3m 46s
"uptime": int64(191026), // in seconds of 2d 5h 3m 46s
"cpu": int64(58),
"rss": int64(418548),
"pss": int64(319391),
@@ -219,7 +219,7 @@ var sampleStat = `
<spawn_end_time>1452746845013365</spawn_end_time>
<last_used>1452747071764940</last_used>
<last_used_desc>0s ago</last_used_desc>
<uptime>3m 46s</uptime>
<uptime>2d 5h 3m 46s</uptime>
<code_revision>899ac7f</code_revision>
<life_status>ALIVE</life_status>
<enabled>ENABLED</enabled>
@@ -263,7 +263,7 @@ var sampleStat = `
<spawn_end_time>1452746845172460</spawn_end_time>
<last_used>1452747071709179</last_used>
<last_used_desc>0s ago</last_used_desc>
<uptime>3m 46s</uptime>
<uptime>2d 5h 3m 46s</uptime>
<code_revision>899ac7f</code_revision>
<life_status>ALIVE</life_status>
<enabled>ENABLED</enabled>

View File

@@ -128,16 +128,16 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
fields["packets_transmitted"] = trans
fields["packets_received"] = rec
fields["percent_packet_loss"] = loss
if min > 0 {
if min >= 0 {
fields["minimum_response_ms"] = min
}
if avg > 0 {
if avg >= 0 {
fields["average_response_ms"] = avg
}
if max > 0 {
if max >= 0 {
fields["maximum_response_ms"] = max
}
if stddev > 0 {
if stddev >= 0 {
fields["standard_deviation_ms"] = stddev
}
acc.AddFields("ping", fields, tags)
@@ -198,7 +198,7 @@ func (p *Ping) args(url string) []string {
// It returns (<transmitted packets>, <received packets>, <average response>)
func processPingOutput(out string) (int, int, float64, float64, float64, float64, error) {
var trans, recv int
var min, avg, max, stddev float64
var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0
// Set this error to nil if we find a 'transmitted' line
err := errors.New("Fatal error processing ping output")
lines := strings.Split(out, "\n")

View File

@@ -93,32 +93,32 @@ func processPingOutput(out string) (int, int, int, int, int, int, error) {
// stats data should contain 4 members: entireExpression + ( Send, Receive, Lost )
if len(stats) != 4 {
return 0, 0, 0, 0, 0, 0, err
return 0, 0, 0, -1, -1, -1, err
}
trans, err := strconv.Atoi(stats[1])
if err != nil {
return 0, 0, 0, 0, 0, 0, err
return 0, 0, 0, -1, -1, -1, err
}
receivedPacket, err := strconv.Atoi(stats[2])
if err != nil {
return 0, 0, 0, 0, 0, 0, err
return 0, 0, 0, -1, -1, -1, err
}
// aproxs data should contain 4 members: entireExpression + ( min, max, avg )
if len(aproxs) != 4 {
return trans, receivedReply, receivedPacket, 0, 0, 0, err
return trans, receivedReply, receivedPacket, -1, -1, -1, err
}
min, err := strconv.Atoi(aproxs[1])
if err != nil {
return trans, receivedReply, receivedPacket, 0, 0, 0, err
return trans, receivedReply, receivedPacket, -1, -1, -1, err
}
max, err := strconv.Atoi(aproxs[2])
if err != nil {
return trans, receivedReply, receivedPacket, 0, 0, 0, err
return trans, receivedReply, receivedPacket, -1, -1, -1, err
}
avg, err := strconv.Atoi(aproxs[3])
if err != nil {
return 0, 0, 0, 0, 0, 0, err
return 0, 0, 0, -1, -1, -1, err
}
return trans, receivedReply, receivedPacket, avg, min, max, err
@@ -201,13 +201,13 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error {
fields["packets_received"] = receivePacket
fields["percent_packet_loss"] = lossPackets
fields["percent_reply_loss"] = lossReply
if avg > 0 {
if avg >= 0 {
fields["average_response_ms"] = float64(avg)
}
if min > 0 {
if min >= 0 {
fields["minimum_response_ms"] = float64(min)
}
if max > 0 {
if max >= 0 {
fields["maximum_response_ms"] = float64(max)
}
acc.AddFields("ping", fields, tags)

View File

@@ -127,6 +127,8 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error {
meas_name string
)
p.AllColumns = nil
if p.Address == "" || p.Address == "localhost" {
p.Address = localhost
}

View File

@@ -67,7 +67,7 @@ Measurement names are based on the Metric Family and tags are created for each
label. The value is added to a field named based on the metric type.
All metrics receive the `url` tag indicating the related URL specified in the
Telegraf configuration. If using Kubernetes service discovery the `address`
Telegraf configuration. If using Kubernetes service discovery the `address`
tag is also added indicating the discovered ip address.
### Example Output:

View File

@@ -20,7 +20,7 @@ const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client
type Prometheus struct {
// An array of urls to scrape metrics from.
Urls []string
URLs []string `toml:"urls"`
// An array of Kubernetes services to scrape metrics from.
KubernetesServices []string
@@ -73,12 +73,12 @@ func (p *Prometheus) Description() string {
var ErrProtocolError = errors.New("prometheus protocol error")
func (p *Prometheus) AddressToURL(u *url.URL, address string) string {
func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL {
host := address
if u.Port() != "" {
host = address + ":" + u.Port()
}
reconstructedUrl := url.URL{
reconstructedURL := &url.URL{
Scheme: u.Scheme,
Opaque: u.Opaque,
User: u.User,
@@ -89,36 +89,42 @@ func (p *Prometheus) AddressToURL(u *url.URL, address string) string {
Fragment: u.Fragment,
Host: host,
}
return reconstructedUrl.String()
return reconstructedURL
}
type UrlAndAddress struct {
OriginalUrl string
Url string
type URLAndAddress struct {
OriginalURL *url.URL
URL *url.URL
Address string
}
func (p *Prometheus) GetAllURLs() ([]UrlAndAddress, error) {
allUrls := make([]UrlAndAddress, 0)
for _, url := range p.Urls {
allUrls = append(allUrls, UrlAndAddress{Url: url, OriginalUrl: url})
func (p *Prometheus) GetAllURLs() ([]URLAndAddress, error) {
allURLs := make([]URLAndAddress, 0)
for _, u := range p.URLs {
URL, err := url.Parse(u)
if err != nil {
log.Printf("prometheus: Could not parse %s, skipping it. Error: %s", u, err)
continue
}
allURLs = append(allURLs, URLAndAddress{URL: URL, OriginalURL: URL})
}
for _, service := range p.KubernetesServices {
u, err := url.Parse(service)
URL, err := url.Parse(service)
if err != nil {
return nil, err
}
resolvedAddresses, err := net.LookupHost(u.Hostname())
resolvedAddresses, err := net.LookupHost(URL.Hostname())
if err != nil {
log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", u.Host, err)
log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", URL.Host, err)
continue
}
for _, resolved := range resolvedAddresses {
serviceUrl := p.AddressToURL(u, resolved)
allUrls = append(allUrls, UrlAndAddress{Url: serviceUrl, Address: resolved, OriginalUrl: service})
serviceURL := p.AddressToURL(URL, resolved)
allURLs = append(allURLs, URLAndAddress{URL: serviceURL, Address: resolved, OriginalURL: URL})
}
}
return allUrls, nil
return allURLs, nil
}
// Reads stats from all configured servers accumulates stats.
@@ -134,16 +140,16 @@ func (p *Prometheus) Gather(acc telegraf.Accumulator) error {
var wg sync.WaitGroup
allUrls, err := p.GetAllURLs()
allURLs, err := p.GetAllURLs()
if err != nil {
return err
}
for _, url := range allUrls {
for _, URL := range allURLs {
wg.Add(1)
go func(serviceUrl UrlAndAddress) {
go func(serviceURL URLAndAddress) {
defer wg.Done()
acc.AddError(p.gatherURL(serviceUrl, acc))
}(url)
acc.AddError(p.gatherURL(serviceURL, acc))
}(URL)
}
wg.Wait()
@@ -178,8 +184,8 @@ func (p *Prometheus) createHttpClient() (*http.Client, error) {
return client, nil
}
func (p *Prometheus) gatherURL(url UrlAndAddress, acc telegraf.Accumulator) error {
var req, err = http.NewRequest("GET", url.Url, nil)
func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error {
var req, err = http.NewRequest("GET", u.URL.String(), nil)
req.Header.Add("Accept", acceptHeader)
var token []byte
var resp *http.Response
@@ -194,11 +200,11 @@ func (p *Prometheus) gatherURL(url UrlAndAddress, acc telegraf.Accumulator) erro
resp, err = p.client.Do(req)
if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", url.Url, err)
return fmt.Errorf("error making HTTP request to %s: %s", u.URL, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status %s", url.Url, resp.Status)
return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
@@ -209,14 +215,16 @@ func (p *Prometheus) gatherURL(url UrlAndAddress, acc telegraf.Accumulator) erro
metrics, err := Parse(body, resp.Header)
if err != nil {
return fmt.Errorf("error reading metrics for %s: %s",
url.Url, err)
u.URL, err)
}
// Add (or not) collected metrics
for _, metric := range metrics {
tags := metric.Tags()
tags["url"] = url.OriginalUrl
if url.Address != "" {
tags["address"] = url.Address
// strip user and password from URL
u.OriginalURL.User = nil
tags["url"] = u.OriginalURL.String()
if u.Address != "" {
tags["address"] = u.Address
}
switch metric.Type() {

View File

@@ -37,7 +37,7 @@ func TestPrometheusGeneratesMetrics(t *testing.T) {
defer ts.Close()
p := &Prometheus{
Urls: []string{ts.URL},
URLs: []string{ts.URL},
}
var acc testutil.Accumulator
@@ -89,7 +89,7 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) {
defer ts.Close()
p := &Prometheus{
Urls: []string{ts.URL},
URLs: []string{ts.URL},
KubernetesServices: []string{"http://random.telegraf.local:88/metrics"},
}

View File

@@ -1116,30 +1116,30 @@ DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetwee
DECLARE @w1 TABLE
(
WaitType nvarchar(64) NOT NULL,
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
WaitTimeInMs bigint NOT NULL,
WaitTaskCount bigint NOT NULL,
CollectionDate datetime NOT NULL
)
DECLARE @w2 TABLE
(
WaitType nvarchar(64) NOT NULL,
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
WaitTimeInMs bigint NOT NULL,
WaitTaskCount bigint NOT NULL,
CollectionDate datetime NOT NULL
)
DECLARE @w3 TABLE
(
WaitType nvarchar(64) NOT NULL
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
)
DECLARE @w4 TABLE
(
WaitType nvarchar(64) NOT NULL,
WaitCategory nvarchar(64) NOT NULL
WaitType nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL
)
DECLARE @w5 TABLE
(
WaitCategory nvarchar(64) NOT NULL,
WaitCategory nvarchar(64) collate SQL_Latin1_General_CP1_CI_AS NOT NULL,
WaitTimeInMs bigint NOT NULL,
WaitTaskCount bigint NOT NULL
)
@@ -1380,12 +1380,12 @@ INSERT @w4 (WaitType, WaitCategory) VALUES ('ABR', 'OTHER') ,
INSERT @w1 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
SELECT
WaitType = wait_type
WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
, WaitTimeInMs = SUM(wait_time_ms)
, WaitTaskCount = SUM(waiting_tasks_count)
, CollectionDate = GETDATE()
FROM sys.dm_os_wait_stats
WHERE [wait_type] NOT IN
WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
(
SELECT WaitType FROM @w3
)
@@ -1396,12 +1396,12 @@ WAITFOR DELAY @delayInterval;
INSERT @w2 (WaitType, WaitTimeInMs, WaitTaskCount, CollectionDate)
SELECT
WaitType = wait_type
WaitType = wait_type collate SQL_Latin1_General_CP1_CI_AS
, WaitTimeInMs = SUM(wait_time_ms)
, WaitTaskCount = SUM(waiting_tasks_count)
, CollectionDate = GETDATE()
FROM sys.dm_os_wait_stats
WHERE [wait_type] NOT IN
WHERE [wait_type] collate SQL_Latin1_General_CP1_CI_AS NOT IN
(
SELECT WaitType FROM @w3
)

View File

@@ -117,6 +117,140 @@ func TestDiskUsage(t *testing.T) {
assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields())
}
func TestDiskUsageHostMountPrefix(t *testing.T) {
tests := []struct {
name string
partitionStats []disk.PartitionStat
usageStats []*disk.UsageStat
hostMountPrefix string
expectedTags map[string]string
expectedFields map[string]interface{}
}{
{
name: "no host mount prefix",
partitionStats: []disk.PartitionStat{
{
Device: "/dev/sda",
Mountpoint: "/",
Fstype: "ext4",
Opts: "ro",
},
},
usageStats: []*disk.UsageStat{
&disk.UsageStat{
Path: "/",
Total: 42,
},
},
expectedTags: map[string]string{
"path": "/",
"device": "sda",
"fstype": "ext4",
"mode": "ro",
},
expectedFields: map[string]interface{}{
"total": uint64(42),
"used": uint64(0),
"free": uint64(0),
"inodes_total": uint64(0),
"inodes_free": uint64(0),
"inodes_used": uint64(0),
"used_percent": float64(0),
},
},
{
name: "host mount prefix",
partitionStats: []disk.PartitionStat{
{
Device: "/dev/sda",
Mountpoint: "/hostfs/var",
Fstype: "ext4",
Opts: "ro",
},
},
usageStats: []*disk.UsageStat{
&disk.UsageStat{
Path: "/hostfs/var",
Total: 42,
},
},
hostMountPrefix: "/hostfs",
expectedTags: map[string]string{
"path": "/var",
"device": "sda",
"fstype": "ext4",
"mode": "ro",
},
expectedFields: map[string]interface{}{
"total": uint64(42),
"used": uint64(0),
"free": uint64(0),
"inodes_total": uint64(0),
"inodes_free": uint64(0),
"inodes_used": uint64(0),
"used_percent": float64(0),
},
},
{
name: "host mount prefix exact match",
partitionStats: []disk.PartitionStat{
{
Device: "/dev/sda",
Mountpoint: "/hostfs",
Fstype: "ext4",
Opts: "ro",
},
},
usageStats: []*disk.UsageStat{
&disk.UsageStat{
Path: "/hostfs",
Total: 42,
},
},
hostMountPrefix: "/hostfs",
expectedTags: map[string]string{
"path": "/",
"device": "sda",
"fstype": "ext4",
"mode": "ro",
},
expectedFields: map[string]interface{}{
"total": uint64(42),
"used": uint64(0),
"free": uint64(0),
"inodes_total": uint64(0),
"inodes_free": uint64(0),
"inodes_used": uint64(0),
"used_percent": float64(0),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mck := &mock.Mock{}
mps := MockPSDisk{&systemPS{&mockDiskUsage{mck}}, mck}
defer mps.AssertExpectations(t)
var acc testutil.Accumulator
var err error
mps.On("Partitions", true).Return(tt.partitionStats, nil)
for _, v := range tt.usageStats {
mps.On("PSDiskUsage", v.Path).Return(v, nil)
}
mps.On("OSGetenv", "HOST_MOUNT_PREFIX").Return(tt.hostMountPrefix)
err = (&DiskStats{ps: mps}).Gather(&acc)
require.NoError(t, err)
acc.AssertContainsTaggedFields(t, "disk", tt.expectedFields, tt.expectedTags)
})
}
}
func TestDiskStats(t *testing.T) {
var mps MockPS
defer mps.AssertExpectations(t)

View File

@@ -2,6 +2,7 @@ package system
import (
"os"
"path/filepath"
"strings"
"github.com/influxdata/telegraf"
@@ -129,7 +130,7 @@ func (s *systemPS) DiskUsage(
continue
}
du.Path = strings.TrimPrefix(p.Mountpoint, hostMountPrefix)
du.Path = filepath.Join("/", strings.TrimPrefix(p.Mountpoint, hostMountPrefix))
du.Fstype = p.Fstype
usage = append(usage, du)
partitions = append(partitions, &p)

View File

@@ -162,6 +162,7 @@ func (m *MQTT) publish(topic string, body []byte) error {
func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
opts := paho.NewClientOptions()
opts.KeepAlive = 0 * time.Second
if m.Timeout.Duration < time.Second {
m.Timeout.Duration = 5 * time.Second

View File

@@ -631,7 +631,7 @@ func setupPrometheus() (*PrometheusClient, *prometheus_input.Prometheus, error)
time.Sleep(time.Millisecond * 200)
p := &prometheus_input.Prometheus{
Urls: []string{"http://localhost:9127/metrics"},
URLs: []string{"http://localhost:9127/metrics"},
}
return pTesting, p, nil