Compare commits
10 Commits
0.10.3-win
...
0.10.4.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc4cb1654c | ||
|
|
f1fa915985 | ||
|
|
11482a75a1 | ||
|
|
e983d35c25 | ||
|
|
85c4f753ad | ||
|
|
1847ce3f3d | ||
|
|
83c27cc7b1 | ||
|
|
3e8f96a463 | ||
|
|
69e4f16b13 | ||
|
|
918c3fb260 |
19
CHANGELOG.md
19
CHANGELOG.md
@@ -1,13 +1,30 @@
|
||||
## v0.10.4 [unreleased]
|
||||
## v0.10.5 [unreleased]
|
||||
|
||||
### Release Notes
|
||||
|
||||
### Features
|
||||
|
||||
### Bugfixes
|
||||
|
||||
## v0.10.4 [2016-02-24]
|
||||
|
||||
### Release Notes
|
||||
- The pass/drop parameters have been renamed to fielddrop/fieldpass parameters,
|
||||
to more accurately indicate their purpose.
|
||||
- There are also now namedrop/namepass parameters for passing/dropping based
|
||||
on the metric _name_.
|
||||
- Experimental windows builds now available.
|
||||
|
||||
### Features
|
||||
- [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene!
|
||||
- [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion!
|
||||
- [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel!
|
||||
- [#736](https://github.com/influxdata/telegraf/pull/736): Ignore dummy filesystems from disk plugin. Thanks @PierreF!
|
||||
- [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath!
|
||||
|
||||
### Bugfixes
|
||||
- [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode.
|
||||
- [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters.
|
||||
|
||||
## v0.10.3 [2016-02-18]
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git dbd8d5c40a582eb9adacde36b47932b3a3ad0034
|
||||
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
|
||||
github.com/Shopify/sarama d37c73f2b2bce85f7fa16b6a550d26c5372892ef
|
||||
github.com/Sirupsen/logrus f7f79f729e0fbe2fcc061db48a9ba0263f588252
|
||||
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
|
||||
@@ -21,18 +21,18 @@ github.com/gorilla/context 1c83b3eabd45b6d76072b66b746c20815fb2872d
|
||||
github.com/gorilla/mux 26a6070f849969ba72b72256e9f14cf519751690
|
||||
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
|
||||
github.com/influxdata/config bae7cb98197d842374d3b8403905924094930f24
|
||||
github.com/influxdata/influxdb a9552fdd91361819a792f337e5d9998859732a67
|
||||
github.com/influxdb/influxdb a9552fdd91361819a792f337e5d9998859732a67
|
||||
github.com/influxdata/influxdb ef571fc104dc24b77cd3710c156cd95e5cfd7aa5
|
||||
github.com/jmespath/go-jmespath c01cf91b011868172fdcd9f41838e80c9d716264
|
||||
github.com/klauspost/crc32 999f3125931f6557b991b2f8472172bdfa578d38
|
||||
github.com/lib/pq 8ad2b298cadd691a77015666a5372eae5dbfac8f
|
||||
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
|
||||
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
|
||||
github.com/miekg/dns e0d84d97e59bcb6561eae269c4e94d25b66822cb
|
||||
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
|
||||
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
|
||||
github.com/naoina/toml 751171607256bb66e64c9f0220c00662420c38e9
|
||||
github.com/nats-io/nats 6a83f1a633cfbfd90aa648ac99fb38c06a8b40df
|
||||
github.com/nsqio/go-nsq 2118015c120962edc5d03325c680daf3163a8b5f
|
||||
github.com/pborman/uuid dee7705ef7b324f27ceb85a121c61f2c2e8ce988
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/prometheus/client_golang 67994f177195311c3ea3d4407ed0175e34a4256f
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
|
||||
24
README.md
24
README.md
@@ -27,12 +27,12 @@ the [release blog post](https://influxdata.com/blog/announcing-telegraf-0-10-0/)
|
||||
### Linux deb and rpm Packages:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.3-1_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.3-1.x86_64.rpm
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_amd64.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.x86_64.rpm
|
||||
|
||||
Latest (arm):
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.3-1_arm.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.3-1.arm.rpm
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.10.4-1_arm.deb
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1.arm.rpm
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_0.2.4_amd64.deb
|
||||
@@ -56,9 +56,9 @@ for instructions, replacing the `influxdb` package name with `telegraf`.
|
||||
### Linux tarballs:
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.3-1_linux_amd64.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.3-1_linux_i386.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.3-1_linux_arm.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_amd64.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_i386.tar.gz
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_linux_arm.tar.gz
|
||||
|
||||
0.2.x:
|
||||
* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.2.4.tar.gz
|
||||
@@ -70,13 +70,13 @@ Latest:
|
||||
To install the full directory structure with config file, run:
|
||||
|
||||
```
|
||||
sudo tar -C / -zxvf ./telegraf-0.10.3-1_linux_amd64.tar.gz
|
||||
sudo tar -C / -zxvf ./telegraf-0.10.4-1_linux_amd64.tar.gz
|
||||
```
|
||||
|
||||
To extract only the binary, run:
|
||||
|
||||
```
|
||||
tar -zxvf telegraf-0.10.3-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
tar -zxvf telegraf-0.10.4-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
|
||||
```
|
||||
|
||||
### Ansible Role:
|
||||
@@ -90,6 +90,12 @@ brew update
|
||||
brew install telegraf
|
||||
```
|
||||
|
||||
### Windows Binaries (EXPERIMENTAL)
|
||||
|
||||
Latest:
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_amd64.zip
|
||||
* http://get.influxdb.org/telegraf/telegraf-0.10.4-1_windows_i386.zip
|
||||
|
||||
### From Source:
|
||||
|
||||
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
|
||||
|
||||
164
etc/telegraf_windows.conf
Normal file
164
etc/telegraf_windows.conf
Normal file
@@ -0,0 +1,164 @@
|
||||
# Telegraf configuration
|
||||
|
||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
||||
# declared inputs, and sent to the declared outputs.
|
||||
|
||||
# Plugins must be declared in here to be active.
|
||||
# To deactivate a plugin, comment out the name and any variables.
|
||||
|
||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
||||
# file would generate.
|
||||
|
||||
# Global tags can be specified here in key="value" format.
|
||||
[global_tags]
|
||||
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
|
||||
# rack = "1a"
|
||||
|
||||
# Configuration for telegraf agent
|
||||
[agent]
|
||||
## Default data collection interval for all inputs
|
||||
interval = "10s"
|
||||
## Rounds collection interval to 'interval'
|
||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
||||
round_interval = true
|
||||
|
||||
## Telegraf will cache metric_buffer_limit metrics for each output, and will
|
||||
## flush this buffer on a successful write.
|
||||
metric_buffer_limit = 10000
|
||||
## Flush the buffer whenever full, regardless of flush_interval.
|
||||
flush_buffer_when_full = true
|
||||
|
||||
## Collection jitter is used to jitter the collection by a random amount.
|
||||
## Each plugin will sleep for a random time within jitter before collecting.
|
||||
## This can be used to avoid many plugins querying things like sysfs at the
|
||||
## same time, which can have a measurable effect on the system.
|
||||
collection_jitter = "0s"
|
||||
|
||||
## Default flushing interval for all outputs. You shouldn't set this below
|
||||
## interval. Maximum flush_interval will be flush_interval + flush_jitter
|
||||
flush_interval = "10s"
|
||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
||||
## large write spikes for users running a large number of telegraf instances.
|
||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
||||
flush_jitter = "0s"
|
||||
|
||||
## Run telegraf in debug mode
|
||||
debug = false
|
||||
## Run telegraf in quiet mode
|
||||
quiet = false
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = ""
|
||||
|
||||
|
||||
###############################################################################
|
||||
# OUTPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Configuration for influxdb server to send metrics to
|
||||
[[outputs.influxdb]]
|
||||
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
|
||||
# Multiple urls can be specified but it is assumed that they are part of the same
|
||||
# cluster, this means that only ONE of the urls will be written to each interval.
|
||||
# urls = ["udp://localhost:8089"] # UDP endpoint example
|
||||
urls = ["http://localhost:8086"] # required
|
||||
# The target database for metrics (telegraf will create it if not exists)
|
||||
database = "telegraf" # required
|
||||
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
# note: using second precision greatly helps InfluxDB compression
|
||||
precision = "s"
|
||||
|
||||
## Write timeout (for the InfluxDB client), formatted as a string.
|
||||
## If not provided, will default to 5s. 0s means no timeout (not recommended).
|
||||
timeout = "5s"
|
||||
# username = "telegraf"
|
||||
# password = "metricsmetricsmetricsmetrics"
|
||||
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
|
||||
# user_agent = "telegraf"
|
||||
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
|
||||
# udp_payload = 512
|
||||
|
||||
|
||||
###############################################################################
|
||||
# INPUTS #
|
||||
###############################################################################
|
||||
|
||||
# Windows Performance Counters plugin.
|
||||
# These are the recommended method of monitoring system metrics on windows,
|
||||
# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI,
|
||||
# which utilizes a lot of system resources.
|
||||
#
|
||||
# See more configuration examples at:
|
||||
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters
|
||||
|
||||
[[inputs.win_perf_counters]]
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Processor usage, alternative to native, reports on a per core.
|
||||
ObjectName = "Processor"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Interrupt Time", "% Privileged Time", "% User Time", "% Processor Time"]
|
||||
Measurement = "win_cpu"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Disk times and queues
|
||||
ObjectName = "LogicalDisk"
|
||||
Instances = ["*"]
|
||||
Counters = ["% Idle Time", "% Disk Time","% Disk Read Time", "% Disk Write Time", "% User Time", "Current Disk Queue Length"]
|
||||
Measurement = "win_disk"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
ObjectName = "System"
|
||||
Counters = ["Context Switches/sec","System Calls/sec"]
|
||||
Instances = ["------"]
|
||||
Measurement = "win_system"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
[[inputs.win_perf_counters.object]]
|
||||
# Example query where the Instance portion must be removed to get data back, such as from the Memory object.
|
||||
ObjectName = "Memory"
|
||||
Counters = ["Available Bytes","Cache Faults/sec","Demand Zero Faults/sec","Page Faults/sec","Pages/sec","Transition Faults/sec","Pool Nonpaged Bytes","Pool Paged Bytes"]
|
||||
Instances = ["------"] # Use 6 x - to remove the Instance bit from the query.
|
||||
Measurement = "win_mem"
|
||||
#IncludeTotal=false #Set to true to include _Total instance when querying for all (*).
|
||||
|
||||
|
||||
# Windows system plugins using WMI (disabled by default, using
|
||||
# win_perf_counters over WMI is recommended)
|
||||
|
||||
# Read metrics about cpu usage
|
||||
#[[inputs.cpu]]
|
||||
## Whether to report per-cpu stats or not
|
||||
#percpu = true
|
||||
## Whether to report total system cpu stats or not
|
||||
#totalcpu = true
|
||||
## Comment this line if you want the raw CPU time metrics
|
||||
#fielddrop = ["time_*"]
|
||||
|
||||
# Read metrics about disk usage by mount point
|
||||
#[[inputs.disk]]
|
||||
## By default, telegraf gather stats for all mountpoints.
|
||||
## Setting mountpoints will restrict the stats to the specified mountpoints.
|
||||
## mount_points=["/"]
|
||||
|
||||
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
|
||||
## present on /run, /var/run, /dev/shm or /dev).
|
||||
#ignore_fs = ["tmpfs", "devtmpfs"]
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
#[[inputs.diskio]]
|
||||
## By default, telegraf will gather stats for all devices including
|
||||
## disk partitions.
|
||||
## Setting devices will restrict the stats to the specified devices.
|
||||
## devices = ["sda", "sdb"]
|
||||
## Uncomment the following line if you do not need disk serial numbers.
|
||||
## skip_serial_number = true
|
||||
|
||||
# Read metrics about memory usage
|
||||
#[[inputs.mem]]
|
||||
# no configuration
|
||||
|
||||
# Read metrics about swap memory usage
|
||||
#[[inputs.swap]]
|
||||
# no configuration
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
var servers = []string{"8.8.8.8"}
|
||||
var domains = []string{"mjasion.pl"}
|
||||
var domains = []string{"google.com"}
|
||||
|
||||
func TestGathering(t *testing.T) {
|
||||
var dnsConfig = DnsQuery{
|
||||
@@ -18,8 +18,10 @@ func TestGathering(t *testing.T) {
|
||||
}
|
||||
var acc testutil.Accumulator
|
||||
|
||||
dnsConfig.Gather(&acc)
|
||||
metric, _ := acc.Get("dns_query")
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
assert.NotEqual(t, 0, queryTime)
|
||||
@@ -33,8 +35,10 @@ func TestGatheringMxRecord(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
dnsConfig.RecordType = "MX"
|
||||
|
||||
dnsConfig.Gather(&acc)
|
||||
metric, _ := acc.Get("dns_query")
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
assert.NotEqual(t, 0, queryTime)
|
||||
@@ -54,8 +58,10 @@ func TestGatheringRootDomain(t *testing.T) {
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
dnsConfig.Gather(&acc)
|
||||
metric, _ := acc.Get("dns_query")
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
fields["query_time_ms"] = queryTime
|
||||
@@ -70,13 +76,15 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
tags := map[string]string{
|
||||
"server": "8.8.8.8",
|
||||
"domain": "mjasion.pl",
|
||||
"domain": "google.com",
|
||||
"record_type": "NS",
|
||||
}
|
||||
fields := map[string]interface{}{}
|
||||
|
||||
dnsConfig.Gather(&acc)
|
||||
metric, _ := acc.Get("dns_query")
|
||||
err := dnsConfig.Gather(&acc)
|
||||
assert.NoError(t, err)
|
||||
metric, ok := acc.Get("dns_query")
|
||||
assert.True(t, ok)
|
||||
queryTime, _ := metric.Fields["query_time_ms"].(float64)
|
||||
|
||||
fields["query_time_ms"] = queryTime
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package httpjson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -23,7 +22,8 @@ type HttpJson struct {
|
||||
TagKeys []string
|
||||
Parameters map[string]string
|
||||
Headers map[string]string
|
||||
client HTTPClient
|
||||
|
||||
client HTTPClient
|
||||
}
|
||||
|
||||
type HTTPClient interface {
|
||||
@@ -182,15 +182,14 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL)
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
data := url.Values{}
|
||||
|
||||
switch {
|
||||
case h.Method == "GET":
|
||||
requestURL.RawQuery = params.Encode()
|
||||
params := requestURL.Query()
|
||||
for k, v := range h.Parameters {
|
||||
params.Add(k, v)
|
||||
}
|
||||
requestURL.RawQuery = params.Encode()
|
||||
|
||||
case h.Method == "POST":
|
||||
requestURL.RawQuery = ""
|
||||
@@ -200,7 +199,8 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) {
|
||||
}
|
||||
|
||||
// Create + send request
|
||||
req, err := http.NewRequest(h.Method, requestURL.String(), bytes.NewBufferString(data.Encode()))
|
||||
req, err := http.NewRequest(h.Method, requestURL.String(),
|
||||
strings.NewReader(data.Encode()))
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package httpjson
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -27,6 +29,75 @@ const validJSON = `
|
||||
"another_list": [4]
|
||||
}`
|
||||
|
||||
const validJSON2 = `{
|
||||
"user":{
|
||||
"hash_rate":0,
|
||||
"expected_24h_rewards":0,
|
||||
"total_rewards":0.000595109232,
|
||||
"paid_rewards":0,
|
||||
"unpaid_rewards":0.000595109232,
|
||||
"past_24h_rewards":0,
|
||||
"total_work":"5172625408",
|
||||
"blocks_found":0
|
||||
},
|
||||
"workers":{
|
||||
"brminer.1":{
|
||||
"hash_rate":0,
|
||||
"hash_rate_24h":0,
|
||||
"valid_shares":"6176",
|
||||
"stale_shares":"0",
|
||||
"invalid_shares":"0",
|
||||
"rewards":4.5506464e-5,
|
||||
"rewards_24h":0,
|
||||
"reset_time":1455409950
|
||||
},
|
||||
"brminer.2":{
|
||||
"hash_rate":0,
|
||||
"hash_rate_24h":0,
|
||||
"valid_shares":"0",
|
||||
"stale_shares":"0",
|
||||
"invalid_shares":"0",
|
||||
"rewards":0,
|
||||
"rewards_24h":0,
|
||||
"reset_time":1455936726
|
||||
},
|
||||
"brminer.3":{
|
||||
"hash_rate":0,
|
||||
"hash_rate_24h":0,
|
||||
"valid_shares":"0",
|
||||
"stale_shares":"0",
|
||||
"invalid_shares":"0",
|
||||
"rewards":0,
|
||||
"rewards_24h":0,
|
||||
"reset_time":1455936733
|
||||
}
|
||||
},
|
||||
"pool":{
|
||||
"hash_rate":114100000,
|
||||
"active_users":843,
|
||||
"total_work":"5015346808842682368",
|
||||
"pps_ratio":1.04,
|
||||
"pps_rate":7.655e-9
|
||||
},
|
||||
"network":{
|
||||
"hash_rate":1426117703,
|
||||
"block_number":944895,
|
||||
"time_per_block":156,
|
||||
"difficulty":51825.72835216,
|
||||
"next_difficulty":51916.15249019,
|
||||
"retarget_time":95053
|
||||
},
|
||||
"market":{
|
||||
"ltc_btc":0.00798,
|
||||
"ltc_usd":3.37801,
|
||||
"ltc_eur":3.113,
|
||||
"ltc_gbp":2.32807,
|
||||
"ltc_rub":241.796,
|
||||
"ltc_cny":21.3883,
|
||||
"btc_usd":422.852
|
||||
}
|
||||
}`
|
||||
|
||||
const validJSONTags = `
|
||||
{
|
||||
"value": 15,
|
||||
@@ -149,6 +220,222 @@ func TestHttpJson200(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test that GET Parameters from the url string are applied properly
|
||||
func TestHttpJsonGET_URL(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
key := r.FormValue("api_key")
|
||||
assert.Equal(t, "mykey", key)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, validJSON2)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := HttpJson{
|
||||
Servers: []string{ts.URL + "?api_key=mykey"},
|
||||
Name: "",
|
||||
Method: "GET",
|
||||
client: RealHTTPClient{client: &http.Client{}},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remove response_time from gathered fields because it's non-deterministic
|
||||
delete(acc.Metrics[0].Fields, "response_time")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"market_btc_usd": float64(422.852),
|
||||
"market_ltc_btc": float64(0.00798),
|
||||
"market_ltc_cny": float64(21.3883),
|
||||
"market_ltc_eur": float64(3.113),
|
||||
"market_ltc_gbp": float64(2.32807),
|
||||
"market_ltc_rub": float64(241.796),
|
||||
"market_ltc_usd": float64(3.37801),
|
||||
"network_block_number": float64(944895),
|
||||
"network_difficulty": float64(51825.72835216),
|
||||
"network_hash_rate": float64(1.426117703e+09),
|
||||
"network_next_difficulty": float64(51916.15249019),
|
||||
"network_retarget_time": float64(95053),
|
||||
"network_time_per_block": float64(156),
|
||||
"pool_active_users": float64(843),
|
||||
"pool_hash_rate": float64(1.141e+08),
|
||||
"pool_pps_rate": float64(7.655e-09),
|
||||
"pool_pps_ratio": float64(1.04),
|
||||
"user_blocks_found": float64(0),
|
||||
"user_expected_24h_rewards": float64(0),
|
||||
"user_hash_rate": float64(0),
|
||||
"user_paid_rewards": float64(0),
|
||||
"user_past_24h_rewards": float64(0),
|
||||
"user_total_rewards": float64(0.000595109232),
|
||||
"user_unpaid_rewards": float64(0.000595109232),
|
||||
"workers_brminer.1_hash_rate": float64(0),
|
||||
"workers_brminer.1_hash_rate_24h": float64(0),
|
||||
"workers_brminer.1_reset_time": float64(1.45540995e+09),
|
||||
"workers_brminer.1_rewards": float64(4.5506464e-05),
|
||||
"workers_brminer.1_rewards_24h": float64(0),
|
||||
"workers_brminer.2_hash_rate": float64(0),
|
||||
"workers_brminer.2_hash_rate_24h": float64(0),
|
||||
"workers_brminer.2_reset_time": float64(1.455936726e+09),
|
||||
"workers_brminer.2_rewards": float64(0),
|
||||
"workers_brminer.2_rewards_24h": float64(0),
|
||||
"workers_brminer.3_hash_rate": float64(0),
|
||||
"workers_brminer.3_hash_rate_24h": float64(0),
|
||||
"workers_brminer.3_reset_time": float64(1.455936733e+09),
|
||||
"workers_brminer.3_rewards": float64(0),
|
||||
"workers_brminer.3_rewards_24h": float64(0),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "httpjson", fields)
|
||||
}
|
||||
|
||||
// Test that GET Parameters are applied properly
|
||||
func TestHttpJsonGET(t *testing.T) {
|
||||
params := map[string]string{
|
||||
"api_key": "mykey",
|
||||
}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
key := r.FormValue("api_key")
|
||||
assert.Equal(t, "mykey", key)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, validJSON2)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := HttpJson{
|
||||
Servers: []string{ts.URL},
|
||||
Name: "",
|
||||
Method: "GET",
|
||||
Parameters: params,
|
||||
client: RealHTTPClient{client: &http.Client{}},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remove response_time from gathered fields because it's non-deterministic
|
||||
delete(acc.Metrics[0].Fields, "response_time")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"market_btc_usd": float64(422.852),
|
||||
"market_ltc_btc": float64(0.00798),
|
||||
"market_ltc_cny": float64(21.3883),
|
||||
"market_ltc_eur": float64(3.113),
|
||||
"market_ltc_gbp": float64(2.32807),
|
||||
"market_ltc_rub": float64(241.796),
|
||||
"market_ltc_usd": float64(3.37801),
|
||||
"network_block_number": float64(944895),
|
||||
"network_difficulty": float64(51825.72835216),
|
||||
"network_hash_rate": float64(1.426117703e+09),
|
||||
"network_next_difficulty": float64(51916.15249019),
|
||||
"network_retarget_time": float64(95053),
|
||||
"network_time_per_block": float64(156),
|
||||
"pool_active_users": float64(843),
|
||||
"pool_hash_rate": float64(1.141e+08),
|
||||
"pool_pps_rate": float64(7.655e-09),
|
||||
"pool_pps_ratio": float64(1.04),
|
||||
"user_blocks_found": float64(0),
|
||||
"user_expected_24h_rewards": float64(0),
|
||||
"user_hash_rate": float64(0),
|
||||
"user_paid_rewards": float64(0),
|
||||
"user_past_24h_rewards": float64(0),
|
||||
"user_total_rewards": float64(0.000595109232),
|
||||
"user_unpaid_rewards": float64(0.000595109232),
|
||||
"workers_brminer.1_hash_rate": float64(0),
|
||||
"workers_brminer.1_hash_rate_24h": float64(0),
|
||||
"workers_brminer.1_reset_time": float64(1.45540995e+09),
|
||||
"workers_brminer.1_rewards": float64(4.5506464e-05),
|
||||
"workers_brminer.1_rewards_24h": float64(0),
|
||||
"workers_brminer.2_hash_rate": float64(0),
|
||||
"workers_brminer.2_hash_rate_24h": float64(0),
|
||||
"workers_brminer.2_reset_time": float64(1.455936726e+09),
|
||||
"workers_brminer.2_rewards": float64(0),
|
||||
"workers_brminer.2_rewards_24h": float64(0),
|
||||
"workers_brminer.3_hash_rate": float64(0),
|
||||
"workers_brminer.3_hash_rate_24h": float64(0),
|
||||
"workers_brminer.3_reset_time": float64(1.455936733e+09),
|
||||
"workers_brminer.3_rewards": float64(0),
|
||||
"workers_brminer.3_rewards_24h": float64(0),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "httpjson", fields)
|
||||
}
|
||||
|
||||
// Test that POST Parameters are applied properly
|
||||
func TestHttpJsonPOST(t *testing.T) {
|
||||
params := map[string]string{
|
||||
"api_key": "mykey",
|
||||
}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "api_key=mykey", string(body))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintln(w, validJSON2)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
a := HttpJson{
|
||||
Servers: []string{ts.URL},
|
||||
Name: "",
|
||||
Method: "POST",
|
||||
Parameters: params,
|
||||
client: RealHTTPClient{client: &http.Client{}},
|
||||
}
|
||||
|
||||
var acc testutil.Accumulator
|
||||
err := a.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remove response_time from gathered fields because it's non-deterministic
|
||||
delete(acc.Metrics[0].Fields, "response_time")
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"market_btc_usd": float64(422.852),
|
||||
"market_ltc_btc": float64(0.00798),
|
||||
"market_ltc_cny": float64(21.3883),
|
||||
"market_ltc_eur": float64(3.113),
|
||||
"market_ltc_gbp": float64(2.32807),
|
||||
"market_ltc_rub": float64(241.796),
|
||||
"market_ltc_usd": float64(3.37801),
|
||||
"network_block_number": float64(944895),
|
||||
"network_difficulty": float64(51825.72835216),
|
||||
"network_hash_rate": float64(1.426117703e+09),
|
||||
"network_next_difficulty": float64(51916.15249019),
|
||||
"network_retarget_time": float64(95053),
|
||||
"network_time_per_block": float64(156),
|
||||
"pool_active_users": float64(843),
|
||||
"pool_hash_rate": float64(1.141e+08),
|
||||
"pool_pps_rate": float64(7.655e-09),
|
||||
"pool_pps_ratio": float64(1.04),
|
||||
"user_blocks_found": float64(0),
|
||||
"user_expected_24h_rewards": float64(0),
|
||||
"user_hash_rate": float64(0),
|
||||
"user_paid_rewards": float64(0),
|
||||
"user_past_24h_rewards": float64(0),
|
||||
"user_total_rewards": float64(0.000595109232),
|
||||
"user_unpaid_rewards": float64(0.000595109232),
|
||||
"workers_brminer.1_hash_rate": float64(0),
|
||||
"workers_brminer.1_hash_rate_24h": float64(0),
|
||||
"workers_brminer.1_reset_time": float64(1.45540995e+09),
|
||||
"workers_brminer.1_rewards": float64(4.5506464e-05),
|
||||
"workers_brminer.1_rewards_24h": float64(0),
|
||||
"workers_brminer.2_hash_rate": float64(0),
|
||||
"workers_brminer.2_hash_rate_24h": float64(0),
|
||||
"workers_brminer.2_reset_time": float64(1.455936726e+09),
|
||||
"workers_brminer.2_rewards": float64(0),
|
||||
"workers_brminer.2_rewards_24h": float64(0),
|
||||
"workers_brminer.3_hash_rate": float64(0),
|
||||
"workers_brminer.3_hash_rate_24h": float64(0),
|
||||
"workers_brminer.3_reset_time": float64(1.455936733e+09),
|
||||
"workers_brminer.3_rewards": float64(0),
|
||||
"workers_brminer.3_rewards_24h": float64(0),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "httpjson", fields)
|
||||
}
|
||||
|
||||
// Test response to HTTP 500
|
||||
func TestHttpJson500(t *testing.T) {
|
||||
httpjson := genMockHttpJson(validJSON, 500)
|
||||
|
||||
@@ -17,7 +17,11 @@ import (
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
)
|
||||
|
||||
const UDP_PACKET_SIZE int = 1500
|
||||
const (
|
||||
UDP_PACKET_SIZE int = 1500
|
||||
|
||||
defaultFieldName = "value"
|
||||
)
|
||||
|
||||
var dropwarn = "ERROR: Message queue full. Discarding line [%s] " +
|
||||
"You may want to increase allowed_pending_messages in the config\n"
|
||||
@@ -113,9 +117,9 @@ type cachedcounter struct {
|
||||
}
|
||||
|
||||
type cachedtimings struct {
|
||||
name string
|
||||
stats RunningStats
|
||||
tags map[string]string
|
||||
name string
|
||||
fields map[string]RunningStats
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
func (_ *Statsd) Description() string {
|
||||
@@ -169,16 +173,26 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error {
|
||||
now := time.Now()
|
||||
|
||||
for _, metric := range s.timings {
|
||||
// Defining a template to parse field names for timers allows us to split
|
||||
// out multiple fields per timer. In this case we prefix each stat with the
|
||||
// field name and store these all in a single measurement.
|
||||
fields := make(map[string]interface{})
|
||||
fields["mean"] = metric.stats.Mean()
|
||||
fields["stddev"] = metric.stats.Stddev()
|
||||
fields["upper"] = metric.stats.Upper()
|
||||
fields["lower"] = metric.stats.Lower()
|
||||
fields["count"] = metric.stats.Count()
|
||||
for _, percentile := range s.Percentiles {
|
||||
name := fmt.Sprintf("%v_percentile", percentile)
|
||||
fields[name] = metric.stats.Percentile(percentile)
|
||||
for fieldName, stats := range metric.fields {
|
||||
var prefix string
|
||||
if fieldName != defaultFieldName {
|
||||
prefix = fieldName + "_"
|
||||
}
|
||||
fields[prefix+"mean"] = stats.Mean()
|
||||
fields[prefix+"stddev"] = stats.Stddev()
|
||||
fields[prefix+"upper"] = stats.Upper()
|
||||
fields[prefix+"lower"] = stats.Lower()
|
||||
fields[prefix+"count"] = stats.Count()
|
||||
for _, percentile := range s.Percentiles {
|
||||
name := fmt.Sprintf("%s%v_percentile", prefix, percentile)
|
||||
fields[name] = stats.Percentile(percentile)
|
||||
}
|
||||
}
|
||||
|
||||
acc.AddFields(metric.name, fields, metric.tags, now)
|
||||
}
|
||||
if s.DeleteTimings {
|
||||
@@ -370,11 +384,6 @@ func (s *Statsd) parseStatsdLine(line string) error {
|
||||
|
||||
// Parse the name & tags from bucket
|
||||
m.name, m.field, m.tags = s.parseName(m.bucket)
|
||||
// fields are not supported for timings, so if specified combine into
|
||||
// the name
|
||||
if (m.mtype == "ms" || m.mtype == "h") && m.field != "value" {
|
||||
m.name += "_" + m.field
|
||||
}
|
||||
switch m.mtype {
|
||||
case "c":
|
||||
m.tags["metric_type"] = "counter"
|
||||
@@ -433,7 +442,7 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) {
|
||||
name = strings.Replace(name, "-", "__", -1)
|
||||
}
|
||||
if field == "" {
|
||||
field = "value"
|
||||
field = defaultFieldName
|
||||
}
|
||||
|
||||
return name, field, tags
|
||||
@@ -461,26 +470,32 @@ func parseKeyValue(keyvalue string) (string, string) {
|
||||
func (s *Statsd) aggregate(m metric) {
|
||||
switch m.mtype {
|
||||
case "ms", "h":
|
||||
// Check if the measurement exists
|
||||
cached, ok := s.timings[m.hash]
|
||||
if !ok {
|
||||
cached = cachedtimings{
|
||||
name: m.name,
|
||||
tags: m.tags,
|
||||
stats: RunningStats{
|
||||
PercLimit: s.PercentileLimit,
|
||||
},
|
||||
name: m.name,
|
||||
fields: make(map[string]RunningStats),
|
||||
tags: m.tags,
|
||||
}
|
||||
}
|
||||
// Check if the field exists. If we've not enabled multiple fields per timer
|
||||
// this will be the default field name, eg. "value"
|
||||
field, ok := cached.fields[m.field]
|
||||
if !ok {
|
||||
field = RunningStats{
|
||||
PercLimit: s.PercentileLimit,
|
||||
}
|
||||
}
|
||||
|
||||
if m.samplerate > 0 {
|
||||
for i := 0; i < int(1.0/m.samplerate); i++ {
|
||||
cached.stats.AddValue(m.floatvalue)
|
||||
field.AddValue(m.floatvalue)
|
||||
}
|
||||
s.timings[m.hash] = cached
|
||||
} else {
|
||||
cached.stats.AddValue(m.floatvalue)
|
||||
s.timings[m.hash] = cached
|
||||
field.AddValue(m.floatvalue)
|
||||
}
|
||||
cached.fields[m.field] = field
|
||||
s.timings[m.hash] = cached
|
||||
case "c":
|
||||
// check if the measurement exists
|
||||
_, ok := s.counters[m.hash]
|
||||
|
||||
@@ -561,12 +561,12 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) {
|
||||
// A 0 with invalid samplerate will add a single 0,
|
||||
// plus the last bit of value 1
|
||||
// which adds up to 12 individual datapoints to be cached
|
||||
if cachedtiming.stats.n != 12 {
|
||||
t.Errorf("Expected 11 additions, got %d", cachedtiming.stats.n)
|
||||
if cachedtiming.fields[defaultFieldName].n != 12 {
|
||||
t.Errorf("Expected 11 additions, got %d", cachedtiming.fields[defaultFieldName].n)
|
||||
}
|
||||
|
||||
if cachedtiming.stats.upper != 1 {
|
||||
t.Errorf("Expected max input to be 1, got %f", cachedtiming.stats.upper)
|
||||
if cachedtiming.fields[defaultFieldName].upper != 1 {
|
||||
t.Errorf("Expected max input to be 1, got %f", cachedtiming.fields[defaultFieldName].upper)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -842,7 +842,105 @@ func TestParse_Timings(t *testing.T) {
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "test_timing", valid)
|
||||
}
|
||||
|
||||
// Tests low-level functionality of timings when multiple fields is enabled
|
||||
// and a measurement template has been defined which can parse field names
|
||||
func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) {
|
||||
s := NewStatsd()
|
||||
s.Templates = []string{"measurement.field"}
|
||||
s.Percentiles = []int{90}
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
validLines := []string{
|
||||
"test_timing.success:1|ms",
|
||||
"test_timing.success:11|ms",
|
||||
"test_timing.success:1|ms",
|
||||
"test_timing.success:1|ms",
|
||||
"test_timing.success:1|ms",
|
||||
"test_timing.error:2|ms",
|
||||
"test_timing.error:22|ms",
|
||||
"test_timing.error:2|ms",
|
||||
"test_timing.error:2|ms",
|
||||
"test_timing.error:2|ms",
|
||||
}
|
||||
|
||||
for _, line := range validLines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
s.Gather(acc)
|
||||
|
||||
valid := map[string]interface{}{
|
||||
"success_90_percentile": float64(11),
|
||||
"success_count": int64(5),
|
||||
"success_lower": float64(1),
|
||||
"success_mean": float64(3),
|
||||
"success_stddev": float64(4),
|
||||
"success_upper": float64(11),
|
||||
|
||||
"error_90_percentile": float64(22),
|
||||
"error_count": int64(5),
|
||||
"error_lower": float64(2),
|
||||
"error_mean": float64(6),
|
||||
"error_stddev": float64(8),
|
||||
"error_upper": float64(22),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "test_timing", valid)
|
||||
}
|
||||
|
||||
// Tests low-level functionality of timings when multiple fields is enabled
|
||||
// but a measurement template hasn't been defined so we can't parse field names
|
||||
// In this case the behaviour should be the same as normal behaviour
|
||||
func TestParse_Timings_MultipleFieldsWithoutTemplate(t *testing.T) {
|
||||
s := NewStatsd()
|
||||
s.Templates = []string{}
|
||||
s.Percentiles = []int{90}
|
||||
acc := &testutil.Accumulator{}
|
||||
|
||||
validLines := []string{
|
||||
"test_timing.success:1|ms",
|
||||
"test_timing.success:11|ms",
|
||||
"test_timing.success:1|ms",
|
||||
"test_timing.success:1|ms",
|
||||
"test_timing.success:1|ms",
|
||||
"test_timing.error:2|ms",
|
||||
"test_timing.error:22|ms",
|
||||
"test_timing.error:2|ms",
|
||||
"test_timing.error:2|ms",
|
||||
"test_timing.error:2|ms",
|
||||
}
|
||||
|
||||
for _, line := range validLines {
|
||||
err := s.parseStatsdLine(line)
|
||||
if err != nil {
|
||||
t.Errorf("Parsing line %s should not have resulted in an error\n", line)
|
||||
}
|
||||
}
|
||||
s.Gather(acc)
|
||||
|
||||
expectedSuccess := map[string]interface{}{
|
||||
"90_percentile": float64(11),
|
||||
"count": int64(5),
|
||||
"lower": float64(1),
|
||||
"mean": float64(3),
|
||||
"stddev": float64(4),
|
||||
"upper": float64(11),
|
||||
}
|
||||
expectedError := map[string]interface{}{
|
||||
"90_percentile": float64(22),
|
||||
"count": int64(5),
|
||||
"lower": float64(2),
|
||||
"mean": float64(6),
|
||||
"stddev": float64(8),
|
||||
"upper": float64(22),
|
||||
}
|
||||
|
||||
acc.AssertContainsFields(t, "test_timing_success", expectedSuccess)
|
||||
acc.AssertContainsFields(t, "test_timing_error", expectedError)
|
||||
}
|
||||
|
||||
func TestParse_Timings_Delete(t *testing.T) {
|
||||
|
||||
103
scripts/build.py
103
scripts/build.py
@@ -30,6 +30,7 @@ INIT_SCRIPT = "scripts/init.sh"
|
||||
SYSTEMD_SCRIPT = "scripts/telegraf.service"
|
||||
LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
|
||||
DEFAULT_CONFIG = "etc/telegraf.conf"
|
||||
DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf"
|
||||
POSTINST_SCRIPT = "scripts/post-install.sh"
|
||||
PREINST_SCRIPT = "scripts/pre-install.sh"
|
||||
|
||||
@@ -76,7 +77,7 @@ supported_builds = {
|
||||
supported_packages = {
|
||||
"darwin": [ "tar", "zip" ],
|
||||
"linux": [ "deb", "rpm", "tar", "zip" ],
|
||||
"windows": [ "tar", "zip" ],
|
||||
"windows": [ "zip" ],
|
||||
}
|
||||
supported_tags = {
|
||||
# "linux": {
|
||||
@@ -351,20 +352,25 @@ def create_package_fs(build_root):
|
||||
create_dir(os.path.join(build_root, d))
|
||||
os.chmod(os.path.join(build_root, d), 0o755)
|
||||
|
||||
def package_scripts(build_root):
|
||||
def package_scripts(build_root, windows=False):
|
||||
print("\t- Copying scripts and sample configuration to build directory")
|
||||
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
|
||||
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
|
||||
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
|
||||
if windows:
|
||||
shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644)
|
||||
else:
|
||||
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
|
||||
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
|
||||
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
|
||||
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
|
||||
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
|
||||
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
|
||||
|
||||
def go_get():
|
||||
print("Retrieving Go dependencies...")
|
||||
run("go get github.com/sparrc/gdm")
|
||||
run("gdm restore -f Godeps_windows")
|
||||
run("gdm restore")
|
||||
|
||||
def generate_md5_from_file(path):
|
||||
@@ -395,15 +401,20 @@ def build_packages(build_output, version, pkg_arch, nightly=False, rc=None, iter
|
||||
build_root = os.path.join(tmp_build_dir, p, a)
|
||||
# Create directory tree to mimic file system of package
|
||||
create_dir(build_root)
|
||||
create_package_fs(build_root)
|
||||
# Copy in packaging and miscellaneous scripts
|
||||
package_scripts(build_root)
|
||||
if p == 'windows':
|
||||
package_scripts(build_root, windows=True)
|
||||
else:
|
||||
create_package_fs(build_root)
|
||||
# Copy in packaging and miscellaneous scripts
|
||||
package_scripts(build_root)
|
||||
# Copy newly-built binaries to packaging directory
|
||||
for b in targets:
|
||||
if p == 'windows':
|
||||
b = b + '.exe'
|
||||
to = os.path.join(build_root, b)
|
||||
else:
|
||||
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b)
|
||||
fr = os.path.join(current_location, b)
|
||||
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], b)
|
||||
print("\t- [{}][{}] - Moving from '{}' to '{}'".format(p, a, fr, to))
|
||||
copy_file(fr, to)
|
||||
# Package the directory structure
|
||||
@@ -431,34 +442,44 @@ def build_packages(build_output, version, pkg_arch, nightly=False, rc=None, iter
|
||||
a = pkg_arch
|
||||
if a == '386':
|
||||
a = 'i386'
|
||||
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
|
||||
fpm_common_args,
|
||||
name,
|
||||
a,
|
||||
package_type,
|
||||
package_version,
|
||||
package_iteration,
|
||||
build_root,
|
||||
current_location)
|
||||
if pkg_arch is not None:
|
||||
a = saved_a
|
||||
if package_type == "rpm":
|
||||
fpm_command += "--depends coreutils "
|
||||
fpm_command += "--depends lsof"
|
||||
out = run(fpm_command, shell=True)
|
||||
matches = re.search(':path=>"(.*)"', out)
|
||||
outfile = None
|
||||
if matches is not None:
|
||||
outfile = matches.groups()[0]
|
||||
if outfile is None:
|
||||
print("[ COULD NOT DETERMINE OUTPUT ]")
|
||||
else:
|
||||
# Strip nightly version (the unix epoch) from filename
|
||||
if nightly and package_type in ['deb', 'rpm']:
|
||||
outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly"))
|
||||
outfiles.append(os.path.join(os.getcwd(), outfile))
|
||||
# Display MD5 hash for generated package
|
||||
if package_type == 'zip':
|
||||
zip_command = "cd {} && zip {}.zip ./*".format(
|
||||
build_root,
|
||||
name)
|
||||
run(zip_command, shell=True)
|
||||
run("mv {}.zip {}".format(os.path.join(build_root, name), current_location), shell=True)
|
||||
outfile = os.path.join(current_location, name+".zip")
|
||||
outfiles.append(outfile)
|
||||
print("\t\tMD5 = {}".format(generate_md5_from_file(outfile)))
|
||||
else:
|
||||
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
|
||||
fpm_common_args,
|
||||
name,
|
||||
a,
|
||||
package_type,
|
||||
package_version,
|
||||
package_iteration,
|
||||
build_root,
|
||||
current_location)
|
||||
if pkg_arch is not None:
|
||||
a = saved_a
|
||||
if package_type == "rpm":
|
||||
fpm_command += "--depends coreutils "
|
||||
fpm_command += "--depends lsof"
|
||||
out = run(fpm_command, shell=True)
|
||||
matches = re.search(':path=>"(.*)"', out)
|
||||
outfile = None
|
||||
if matches is not None:
|
||||
outfile = matches.groups()[0]
|
||||
if outfile is None:
|
||||
print("[ COULD NOT DETERMINE OUTPUT ]")
|
||||
else:
|
||||
# Strip nightly version (the unix epoch) from filename
|
||||
if nightly and package_type in ['deb', 'rpm']:
|
||||
outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly"))
|
||||
outfiles.append(os.path.join(os.getcwd(), outfile))
|
||||
# Display MD5 hash for generated package
|
||||
print("\t\tMD5 = {}".format(generate_md5_from_file(outfile)))
|
||||
print("")
|
||||
if debug:
|
||||
print("[DEBUG] package outfiles: {}".format(outfiles))
|
||||
|
||||
@@ -76,5 +76,6 @@ if [ $? -eq 0 ]; then
|
||||
tag=$(git describe --exact-match HEAD)
|
||||
echo $tag
|
||||
exit_if_fail ./scripts/build.py --package --version=$tag --platform=linux --arch=all --upload
|
||||
exit_if_fail ./scripts/build.py --package --version=$tag --platform=windows --arch=all --upload
|
||||
mv build $CIRCLE_ARTIFACTS
|
||||
fi
|
||||
|
||||
@@ -133,13 +133,7 @@ func (a *Accumulator) AssertContainsTaggedFields(
|
||||
}
|
||||
|
||||
if p.Measurement == measurement {
|
||||
if !reflect.DeepEqual(fields, p.Fields) {
|
||||
pActual, _ := json.MarshalIndent(p.Fields, "", " ")
|
||||
pExp, _ := json.MarshalIndent(fields, "", " ")
|
||||
msg := fmt.Sprintf("Actual:\n%s\n(%T) \nExpected:\n%s\n(%T)",
|
||||
string(pActual), p.Fields, string(pExp), fields)
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
assert.Equal(t, fields, p.Fields)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -156,13 +150,7 @@ func (a *Accumulator) AssertContainsFields(
|
||||
defer a.Unlock()
|
||||
for _, p := range a.Metrics {
|
||||
if p.Measurement == measurement {
|
||||
if !reflect.DeepEqual(fields, p.Fields) {
|
||||
pActual, _ := json.MarshalIndent(p.Fields, "", " ")
|
||||
pExp, _ := json.MarshalIndent(fields, "", " ")
|
||||
msg := fmt.Sprintf("Actual:\n%s\n(%T) \nExpected:\n%s\n(%T)",
|
||||
string(pActual), p.Fields, string(pExp), fields)
|
||||
assert.Fail(t, msg)
|
||||
}
|
||||
assert.Equal(t, fields, p.Fields)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user