Compare commits

..

13 Commits

Author SHA1 Message Date
Cameron Sparr
90a98c76a0 Finalize 0.13 release 2016-05-11 13:41:32 -07:00
Cameron Sparr
12357ee8c5 processes: add 'unknown' procs (?) 2016-05-11 11:52:29 -07:00
Cameron Sparr
bb254fc2b9 Default docker timeout in case one is not defined in config 2016-05-10 14:18:55 -07:00
Cameron Sparr
aeadc2c43a Update etc/telegraf.conf, mqtt_cons readme 2016-05-10 14:18:55 -07:00
Cameron Sparr
ed492fe950 update influxdb & gopsutil deps 2016-05-10 14:18:55 -07:00
Cameron Sparr
775daba8f5 Change Version->version for consistency w/ influxdb 2016-05-10 14:18:55 -07:00
Cameron Sparr
677dd7ad53 Release 0.13 2016-05-10 14:18:55 -07:00
Cameron Sparr
85dee02a3b snmp plugin: change host -> snmp_host
closes #1156
2016-05-10 14:18:00 -07:00
Cameron Sparr
afdebbc3a2 Make OidInstanceMapping a field of the snmp host
fixes #1171
2016-05-10 10:15:01 -07:00
Jörg Thalheim
5deb22a539 docker: add container_id also to per cpu stats
currently this field exists only for total cpu usage

closes #1168
2016-05-09 16:43:27 -07:00
Ross McDonald
36b9e2e077 Merge pull request #1157 from influxdata/ross-build-updates
Minor fixes to build script
2016-05-06 11:28:48 -05:00
Ross McDonald
5348937c3d Choose correct configuration when building for windows. 2016-05-06 10:46:29 -05:00
Ross McDonald
72fcacbbc7 Minor fixes to build script:
- Fix for --name build parameter
- Remove rc parameter from build script
- Fix regression on first-level tarball directory structure
- Convert any dashes/underscores in version tag to tilde
2016-05-05 14:02:34 -05:00
16 changed files with 157 additions and 497 deletions

View File

@@ -1,4 +1,4 @@
## v0.13 [unreleased]
## v0.13 [2016-05-11]
### Release Notes
@@ -48,7 +48,15 @@ based on _prefix_ in addition to globs. This means that a filter like
- disque: `host -> disque_host`
- rethinkdb: `host -> rethinkdb_host`
- **Breaking Change**: The `win_perf_counters` input has been changed to sanitize field names, replacing `/Sec` and `/sec` with `_persec`, as well as spaces with underscores. This is needed because Graphite doesn't like slashes and spaces, and was failing to accept metrics that had them. The `/[sS]ec` -> `_persec` is just to make things clearer and uniform.
- **Breaking Change**: The `win_perf_counters` input has been changed to
sanitize field names, replacing `/Sec` and `/sec` with `_persec`, as well as
spaces with underscores. This is needed because Graphite doesn't like slashes
and spaces, and was failing to accept metrics that had them.
The `/[sS]ec` -> `_persec` is just to make things clearer and uniform.
- **Breaking Change**: snmp plugin. The `host` tag of the snmp plugin has been
changed to the `snmp_host` tag.
- The `disk` input plugin can now be configured with the `HOST_MOUNT_PREFIX` environment variable.
This value is prepended to any mountpaths discovered before retrieving stats.
It is not included on the report path. This is necessary for reporting host disk stats when running from within a container.

4
Godeps
View File

@@ -25,7 +25,7 @@ github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb 21db76b3374c733f37ed16ad93f3484020034351
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
@@ -42,7 +42,7 @@ github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
github.com/shirou/gopsutil 37d89088411de59a4ef9fc340afa0e89dfcb4ea9
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c

View File

@@ -14,21 +14,21 @@ windows: prepare-windows build-windows
# Only run the build (no dependency grabbing)
build:
go install -ldflags "-X main.Version=$(VERSION)" ./...
go install -ldflags "-X main.version=$(VERSION)" ./...
build-windows:
go build -o telegraf.exe -ldflags \
"-X main.Version=$(VERSION)" \
"-X main.version=$(VERSION)" \
./cmd/telegraf/telegraf.go
build-for-docker:
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
"-s -X main.Version=$(VERSION)" \
"-s -X main.version=$(VERSION)" \
./cmd/telegraf/telegraf.go
# Build with race detector
dev: prepare
go build -race -ldflags "-X main.Version=$(VERSION)" ./...
go build -race -ldflags "-X main.version=$(VERSION)" ./...
# run package script
package:

View File

@@ -20,12 +20,12 @@ new plugins.
### Linux deb and rpm Packages:
Latest:
* http://get.influxdb.org/telegraf/telegraf_0.12.1-1_amd64.deb
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1.x86_64.rpm
* https://dl.influxdata.com/telegraf/releases/telegraf_0.13.0-1_amd64.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1.x86_64.rpm
Latest (arm):
* http://get.influxdb.org/telegraf/telegraf_0.12.1-1_armhf.deb
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1.armhf.rpm
* https://dl.influxdata.com/telegraf/releases/telegraf_0.13.0-1_armhf.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1.armhf.rpm
##### Package Instructions:
@@ -46,28 +46,28 @@ to use this repo to install & update telegraf.
### Linux tarballs:
Latest:
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_linux_amd64.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_linux_i386.tar.gz
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_linux_armhf.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_linux_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_linux_i386.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_linux_armhf.tar.gz
##### tarball Instructions:
To install the full directory structure with config file, run:
```
sudo tar -C / -zxvf ./telegraf-0.12.1-1_linux_amd64.tar.gz
sudo tar -C / -zxvf ./telegraf-0.13.0-1_linux_amd64.tar.gz
```
To extract only the binary, run:
```
tar -zxvf telegraf-0.12.1-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
tar -zxvf telegraf-0.13.0-1_linux_amd64.tar.gz --strip-components=3 ./usr/bin/telegraf
```
### FreeBSD tarball:
Latest:
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_freebsd_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_freebsd_amd64.tar.gz
##### tarball Instructions:
@@ -87,8 +87,8 @@ brew install telegraf
### Windows Binaries (EXPERIMENTAL)
Latest:
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_windows_amd64.zip
* http://get.influxdb.org/telegraf/telegraf-0.12.1-1_windows_i386.zip
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_windows_amd64.zip
* https://dl.influxdata.com/telegraf/releases/telegraf-0.13.0-1_windows_i386.zip
### From Source:

View File

@@ -46,9 +46,13 @@ var fOutputFiltersLegacy = flag.String("outputfilter", "",
var fConfigDirectoryLegacy = flag.String("configdirectory", "",
"directory containing additional *.conf files")
// Telegraf version
// -ldflags "-X main.Version=`git describe --always --tags`"
var Version string
// Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`"
var (
version string
commit string
branch string
)
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
@@ -132,7 +136,7 @@ func main() {
if len(args) > 0 {
switch args[0] {
case "version":
v := fmt.Sprintf("Telegraf - Version %s", Version)
v := fmt.Sprintf("Telegraf - version %s", version)
fmt.Println(v)
return
case "config":
@@ -158,7 +162,7 @@ func main() {
}
if *fVersion {
v := fmt.Sprintf("Telegraf - Version %s", Version)
v := fmt.Sprintf("Telegraf - version %s", version)
fmt.Println(v)
return
}
@@ -251,7 +255,7 @@ func main() {
}
}()
log.Printf("Starting Telegraf (version %s)\n", Version)
log.Printf("Starting Telegraf (version %s)\n", version)
log.Printf("Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("Tags enabled: %s", c.ListTags())

View File

@@ -638,8 +638,8 @@
#
# ## If no servers are specified, then default to 127.0.0.1:1936
# servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"]
# ## Or you can also use local socket(not work yet)
# ## servers = ["socket://run/haproxy/admin.sock"]
# ## Or you can also use local socket
# ## servers = ["socket:/run/haproxy/admin.sock"]
# # HTTP/HTTPS request given an address a method and a timeout

View File

@@ -19,7 +19,6 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/igloo"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"

View File

@@ -307,7 +307,11 @@ func gatherContainerStats(
for i, percpu := range stat.CPUStats.CPUUsage.PercpuUsage {
percputags := copyTags(tags)
percputags["cpu"] = fmt.Sprintf("cpu%d", i)
acc.AddFields("docker_container_cpu", map[string]interface{}{"usage_total": percpu}, percputags, now)
fields := map[string]interface{}{
"usage_total": percpu,
"container_id": id,
}
acc.AddFields("docker_container_cpu", fields, percputags, now)
}
for network, netstats := range stat.Networks {
@@ -466,6 +470,8 @@ func parseSize(sizeStr string) (int64, error) {
func init() {
inputs.Add("docker", func() telegraf.Input {
return &Docker{}
return &Docker{
Timeout: internal.Duration{Duration: time.Second * 5},
}
})
}

View File

@@ -111,13 +111,15 @@ func TestDockerGatherContainerStats(t *testing.T) {
cputags["cpu"] = "cpu0"
cpu0fields := map[string]interface{}{
"usage_total": uint64(1),
"usage_total": uint64(1),
"container_id": "123456789",
}
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpu0fields, cputags)
cputags["cpu"] = "cpu1"
cpu1fields := map[string]interface{}{
"usage_total": uint64(1002),
"usage_total": uint64(1002),
"container_id": "123456789",
}
acc.AssertContainsTaggedFields(t, "docker_container_cpu", cpu1fields, cputags)
}
@@ -372,7 +374,8 @@ func TestDockerGatherInfo(t *testing.T) {
acc.AssertContainsTaggedFields(t,
"docker_container_cpu",
map[string]interface{}{
"usage_total": uint64(1231652),
"usage_total": uint64(1231652),
"container_id": "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173",
},
map[string]string{
"container_name": "etcd2",

View File

@@ -1,23 +0,0 @@
# igloo Input Plugin
The igloo plugin "tails" a logfile and parses each log message.
By default, the igloo plugin acts like the following unix tail command:
```
tail -F --lines=0 myfile.log
```
- `-F` means that it will follow the _name_ of the given file, so
that it will be compatible with log-rotated files, and that it will retry on
inaccessible files.
- `--lines=0` means that it will start at the end of the file (unless
the `from_beginning` option is set).
see http://man7.org/linux/man-pages/man1/tail.1.html for more details.
### Configuration:
```toml
```

View File

@@ -1,331 +0,0 @@
package igloo
import (
"fmt"
"log"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/hpcloud/tail"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/globpath"
"github.com/influxdata/telegraf/plugins/inputs"
)
// format of timestamps
const (
rfcFormat string = "%s-%s-%sT%s:%s:%s.%sZ"
)
var (
// regex for finding timestamps
tRe = regexp.MustCompile(`Timestamp=((\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2}),(\d+))`)
)
type Tail struct {
Files []string
FromBeginning bool
TagKeys []string
Counters []string
NumFields []string
StrFields []string
numfieldsRe map[string]*regexp.Regexp
strfieldsRe map[string]*regexp.Regexp
countersRe map[string]*regexp.Regexp
tagsRe map[string]*regexp.Regexp
counters map[string]map[string]int64
tailers []*tail.Tail
wg sync.WaitGroup
acc telegraf.Accumulator
sync.Mutex
}
func NewTail() *Tail {
return &Tail{
FromBeginning: false,
}
}
const sampleConfig = `
## logfiles to parse.
##
## These accept standard unix glob matching rules, but with the addition of
## ** as a "super asterisk". ie:
## "/var/log/**.log" -> recursively find all .log files in /var/log
## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
## "/var/log/apache.log" -> just tail the apache log file
##
## See https://github.com/gobwas/glob for more examples
##
files = ["$HOME/sample.log"]
## Read file from beginning.
from_beginning = false
## Each log message is searched for these tag keys in TagKey=Value format.
## Any that are found will be tagged on the resulting influx measurements.
tag_keys = [
"HostLocal",
"ProductName",
"OperationName",
]
## counters are keys which are treated as counters.
## so if counters = ["Result"], then this means that the following ocurrence
## on a log line:
## Result=Success
## would be treated as a counter: Result_Success, and it will be incremented
## for every occurrence, until Telegraf is restarted.
counters = ["Result"]
## num_fields are log line occurrences that are translated into numerical
## fields. ie:
## Duration=1
num_fields = ["Duration", "Attempt"]
## str_fields are log line occurences that are translated into string fields,
## ie:
## ActivityGUID=0bb03bf4-ae1d-4487-bb6f-311653b35760
str_fields = ["ActivityGUID"]
`
func (t *Tail) SampleConfig() string {
return sampleConfig
}
func (t *Tail) Description() string {
return "Stream an igloo file, like the tail -f command"
}
func (t *Tail) Gather(acc telegraf.Accumulator) error {
return nil
}
func (t *Tail) buildRegexes() error {
t.numfieldsRe = make(map[string]*regexp.Regexp)
t.strfieldsRe = make(map[string]*regexp.Regexp)
t.tagsRe = make(map[string]*regexp.Regexp)
t.countersRe = make(map[string]*regexp.Regexp)
t.counters = make(map[string]map[string]int64)
for _, field := range t.NumFields {
re, err := regexp.Compile(field + `=([0-9\.]+)`)
if err != nil {
return err
}
t.numfieldsRe[field] = re
}
for _, field := range t.StrFields {
re, err := regexp.Compile(field + `=([0-9a-zA-Z\.\-]+)`)
if err != nil {
return err
}
t.strfieldsRe[field] = re
}
for _, field := range t.TagKeys {
re, err := regexp.Compile(field + `=([0-9a-zA-Z\.\-]+)`)
if err != nil {
return err
}
t.tagsRe[field] = re
}
for _, field := range t.Counters {
re, err := regexp.Compile("(" + field + ")" + `=([0-9a-zA-Z\.\-]+)`)
if err != nil {
return err
}
t.countersRe[field] = re
}
return nil
}
func (t *Tail) Start(acc telegraf.Accumulator) error {
t.Lock()
defer t.Unlock()
t.acc = acc
if err := t.buildRegexes(); err != nil {
return err
}
var seek tail.SeekInfo
if !t.FromBeginning {
seek.Whence = 2
seek.Offset = 0
}
var errS string
// Create a "tailer" for each file
for _, filepath := range t.Files {
g, err := globpath.Compile(filepath)
if err != nil {
log.Printf("ERROR Glob %s failed to compile, %s", filepath, err)
}
for file, _ := range g.Match() {
tailer, err := tail.TailFile(file,
tail.Config{
ReOpen: true,
Follow: true,
Location: &seek,
})
if err != nil {
errS += err.Error() + " "
continue
}
// create a goroutine for each "tailer"
go t.receiver(tailer)
t.tailers = append(t.tailers, tailer)
}
}
if errS != "" {
return fmt.Errorf(errS)
}
return nil
}
// this is launched as a goroutine to continuously watch a tailed logfile
// for changes, parse any incoming msgs, and add to the accumulator.
func (t *Tail) receiver(tailer *tail.Tail) {
t.wg.Add(1)
defer t.wg.Done()
var err error
var line *tail.Line
for line = range tailer.Lines {
if line.Err != nil {
log.Printf("ERROR tailing file %s, Error: %s\n",
tailer.Filename, err)
continue
}
err = t.Parse(line.Text)
if err != nil {
log.Printf("ERROR: %s", err)
}
}
}
func (t *Tail) Parse(line string) error {
// find the timestamp:
match := tRe.FindAllStringSubmatch(line, -1)
if len(match) < 1 {
return nil
}
if len(match[0]) < 9 {
return nil
}
// make an rfc3339 timestamp and parse it:
ts, err := time.Parse(time.RFC3339Nano,
fmt.Sprintf(rfcFormat, match[0][2], match[0][3], match[0][4], match[0][5], match[0][6], match[0][7], match[0][8]))
if err != nil {
return nil
}
fields := make(map[string]interface{})
tags := make(map[string]string)
// parse numerical fields:
for name, re := range t.numfieldsRe {
match := re.FindAllStringSubmatch(line, -1)
if len(match) < 1 {
continue
}
if len(match[0]) < 2 {
continue
}
num, err := strconv.ParseFloat(match[0][1], 64)
if err == nil {
fields[name] = num
}
}
// parse string fields:
for name, re := range t.strfieldsRe {
match := re.FindAllStringSubmatch(line, -1)
if len(match) < 1 {
continue
}
if len(match[0]) < 2 {
continue
}
fields[name] = match[0][1]
}
// parse tags:
for name, re := range t.tagsRe {
match := re.FindAllStringSubmatch(line, -1)
if len(match) < 1 {
continue
}
if len(match[0]) < 2 {
continue
}
tags[name] = match[0][1]
}
if len(t.countersRe) > 0 {
// Make a unique key for the measurement name/tags
var tg []string
for k, v := range tags {
tg = append(tg, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(tg)
hash := fmt.Sprintf("%s%s", strings.Join(tg, ""), "igloo")
// check if this hash already has a counter map
_, ok := t.counters[hash]
if !ok {
// doesnt have counter map, so make one
t.counters[hash] = make(map[string]int64)
}
// search for counter matches:
for _, re := range t.countersRe {
match := re.FindAllStringSubmatch(line, -1)
if len(match) < 1 {
continue
}
if len(match[0]) < 3 {
continue
}
counterName := match[0][1] + "_" + match[0][2]
// increment this counter
t.counters[hash][counterName] += 1
// add this counter to the output fields
fields[counterName] = t.counters[hash][counterName]
}
}
t.acc.AddFields("igloo", fields, tags, ts)
return nil
}
func (t *Tail) Stop() {
t.Lock()
defer t.Unlock()
for _, t := range t.tailers {
err := t.Stop()
if err != nil {
log.Printf("ERROR stopping tail on file %s\n", t.Filename)
}
t.Cleanup()
}
t.wg.Wait()
}
func init() {
inputs.Add("igloo", func() telegraf.Input {
return NewTail()
})
}

View File

@@ -21,8 +21,12 @@ The plugin expects messages in the
"sensors/#",
]
## Maximum number of metrics to buffer between collection intervals
metric_buffer = 100000
# if true, messages that can't be delivered while the subscriber is offline
# will be delivered when it comes back (such as on service restart).
# NOTE: if true, client_id MUST be set
persistent_session = false
# If empty, a random client ID will be generated.
client_id = ""
## username and password to connect MQTT server.
# username = "telegraf"

View File

@@ -26,9 +26,6 @@ type Snmp struct {
nameToOid map[string]string
initNode Node
subTableMap map[string]Subtable
// TODO change as unexportable
//OidInstanceMapping map[string]map[string]string
}
type Host struct {
@@ -53,6 +50,8 @@ type Host struct {
// array of processed oids
// to skip oid duplication
processedOids []string
OidInstanceMapping map[string]map[string]string
}
type Table struct {
@@ -116,9 +115,6 @@ type Node struct {
subnodes map[string]Node
}
// TODO move this var to snmp struct
var OidInstanceMapping = make(map[string]map[string]string)
var sampleConfig = `
## Use 'oids.txt' file to translate oids to names
## To generate 'oids.txt' you need to run:
@@ -396,7 +392,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
// TODO save mapping and computed oids
// to do it only the first time
// only if len(s.OidInstanceMapping) == 0
if len(OidInstanceMapping) >= 0 {
if len(host.OidInstanceMapping) >= 0 {
if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil {
log.Printf("SNMP Mapping error for host '%s': %s", host.Address, err)
continue
@@ -413,7 +409,14 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error {
return nil
}
func (h *Host) SNMPMap(acc telegraf.Accumulator, nameToOid map[string]string, subTableMap map[string]Subtable) error {
func (h *Host) SNMPMap(
acc telegraf.Accumulator,
nameToOid map[string]string,
subTableMap map[string]Subtable,
) error {
if h.OidInstanceMapping == nil {
h.OidInstanceMapping = make(map[string]map[string]string)
}
// Get snmp client
snmpClient, err := h.GetSNMPClient()
if err != nil {
@@ -523,11 +526,11 @@ func (h *Host) SNMPMap(acc telegraf.Accumulator, nameToOid map[string]string, su
// Building mapping table
mapping := map[string]string{strings.Trim(key, "."): string(variable.Value.([]byte))}
_, exists := OidInstanceMapping[table.oid]
_, exists := h.OidInstanceMapping[table.oid]
if exists {
OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte))
h.OidInstanceMapping[table.oid][strings.Trim(key, ".")] = string(variable.Value.([]byte))
} else {
OidInstanceMapping[table.oid] = mapping
h.OidInstanceMapping[table.oid] = mapping
}
// Add table oid in bulk oid list
@@ -720,7 +723,12 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) {
return snmpClient, nil
}
func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, acc telegraf.Accumulator, initNode Node) (string, error) {
func (h *Host) HandleResponse(
oids map[string]Data,
result *gosnmp.SnmpPacket,
acc telegraf.Accumulator,
initNode Node,
) (string, error) {
var lastOid string
for _, variable := range result.Variables {
lastOid = variable.Name
@@ -755,7 +763,7 @@ func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, a
strings.Split(string(variable.Name[1:]), "."))
// Set instance tag
// From mapping table
mapping, inMappingNoSubTable := OidInstanceMapping[oid_key]
mapping, inMappingNoSubTable := h.OidInstanceMapping[oid_key]
if inMappingNoSubTable {
// filter if the instance in not in
// OidInstanceMapping mapping map
@@ -784,7 +792,7 @@ func (h *Host) HandleResponse(oids map[string]Data, result *gosnmp.SnmpPacket, a
// Because the result oid is equal to inputs.snmp.get section
field_name = oid.Name
}
tags["host"], _, _ = net.SplitHostPort(h.Address)
tags["snmp_host"], _, _ = net.SplitHostPort(h.Address)
fields := make(map[string]interface{})
fields[string(field_name)] = variable.Value

View File

@@ -102,8 +102,8 @@ func TestSNMPGet1(t *testing.T) {
"oid1": uint(543846),
},
map[string]string{
"unit": "octets",
"host": testutil.GetLocalHost(),
"unit": "octets",
"snmp_host": testutil.GetLocalHost(),
},
)
}
@@ -140,8 +140,8 @@ func TestSNMPGet2(t *testing.T) {
"ifNumber": int(4),
},
map[string]string{
"instance": "0",
"host": testutil.GetLocalHost(),
"instance": "0",
"snmp_host": testutil.GetLocalHost(),
},
)
}
@@ -180,9 +180,9 @@ func TestSNMPGet3(t *testing.T) {
"ifSpeed": uint(10000000),
},
map[string]string{
"unit": "octets",
"instance": "1",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
}
@@ -222,9 +222,9 @@ func TestSNMPEasyGet4(t *testing.T) {
"ifSpeed": uint(10000000),
},
map[string]string{
"unit": "octets",
"instance": "1",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
@@ -234,8 +234,8 @@ func TestSNMPEasyGet4(t *testing.T) {
"ifNumber": int(4),
},
map[string]string{
"instance": "0",
"host": testutil.GetLocalHost(),
"instance": "0",
"snmp_host": testutil.GetLocalHost(),
},
)
}
@@ -275,9 +275,9 @@ func TestSNMPEasyGet5(t *testing.T) {
"ifSpeed": uint(10000000),
},
map[string]string{
"unit": "octets",
"instance": "1",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
@@ -287,8 +287,8 @@ func TestSNMPEasyGet5(t *testing.T) {
"ifNumber": int(4),
},
map[string]string{
"instance": "0",
"host": testutil.GetLocalHost(),
"instance": "0",
"snmp_host": testutil.GetLocalHost(),
},
)
}
@@ -320,8 +320,8 @@ func TestSNMPEasyGet6(t *testing.T) {
"ifNumber": int(4),
},
map[string]string{
"instance": "0",
"host": testutil.GetLocalHost(),
"instance": "0",
"snmp_host": testutil.GetLocalHost(),
},
)
}
@@ -360,9 +360,9 @@ func TestSNMPBulk1(t *testing.T) {
"ifOutOctets": uint(543846),
},
map[string]string{
"unit": "octets",
"instance": "1",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
@@ -372,9 +372,9 @@ func TestSNMPBulk1(t *testing.T) {
"ifOutOctets": uint(26475179),
},
map[string]string{
"unit": "octets",
"instance": "2",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "2",
"snmp_host": testutil.GetLocalHost(),
},
)
@@ -384,9 +384,9 @@ func TestSNMPBulk1(t *testing.T) {
"ifOutOctets": uint(108963968),
},
map[string]string{
"unit": "octets",
"instance": "3",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "3",
"snmp_host": testutil.GetLocalHost(),
},
)
@@ -396,9 +396,9 @@ func TestSNMPBulk1(t *testing.T) {
"ifOutOctets": uint(12991453),
},
map[string]string{
"unit": "octets",
"instance": "36",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "36",
"snmp_host": testutil.GetLocalHost(),
},
)
}
@@ -438,9 +438,9 @@ func dTestSNMPBulk2(t *testing.T) {
"ifOutOctets": uint(543846),
},
map[string]string{
"unit": "octets",
"instance": "1",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "1",
"snmp_host": testutil.GetLocalHost(),
},
)
@@ -450,9 +450,9 @@ func dTestSNMPBulk2(t *testing.T) {
"ifOutOctets": uint(26475179),
},
map[string]string{
"unit": "octets",
"instance": "2",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "2",
"snmp_host": testutil.GetLocalHost(),
},
)
@@ -462,9 +462,9 @@ func dTestSNMPBulk2(t *testing.T) {
"ifOutOctets": uint(108963968),
},
map[string]string{
"unit": "octets",
"instance": "3",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "3",
"snmp_host": testutil.GetLocalHost(),
},
)
@@ -474,9 +474,9 @@ func dTestSNMPBulk2(t *testing.T) {
"ifOutOctets": uint(12991453),
},
map[string]string{
"unit": "octets",
"instance": "36",
"host": testutil.GetLocalHost(),
"unit": "octets",
"instance": "36",
"snmp_host": testutil.GetLocalHost(),
},
)
}

View File

@@ -70,6 +70,7 @@ func getEmptyFields() map[string]interface{} {
"running": int64(0),
"sleeping": int64(0),
"total": int64(0),
"unknown": int64(0),
}
switch runtime.GOOS {
case "freebsd":
@@ -114,6 +115,8 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error {
fields["sleeping"] = fields["sleeping"].(int64) + int64(1)
case 'I':
fields["idle"] = fields["idle"].(int64) + int64(1)
case '?':
fields["unknown"] = fields["unknown"].(int64) + int64(1)
default:
log.Printf("processes: Unknown state [ %s ] from ps",
string(status[0]))

View File

@@ -132,13 +132,16 @@ def create_package_fs(build_root):
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, windows=False):
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts and configuration files to the package
filesystem.
"""
if windows:
logging.info("Copying configuration to build directory.")
shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
if config_only or windows:
logging.info("Copying configuration to build directory")
if windows:
shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
else:
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "telegraf.conf"))
os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644)
else:
logging.info("Copying scripts and configuration to build directory")
@@ -239,21 +242,15 @@ def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
# Remove leading 'v' and possible '-rc\d+'
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
version = re.sub(r'-rc\d+', '', str(version_tag))
return version
def get_current_rc():
"""Parse release candidate from git tag output.
"""
rc = None
version_tag = get_current_version_tag()
matches = re.match(r'.*-rc(\d+)', str(version_tag))
if matches:
rc, = matches.groups(1)
return rc
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
@@ -418,7 +415,6 @@ def build(version=None,
platform=None,
arch=None,
nightly=False,
rc=None,
race=False,
clean=False,
outdir=".",
@@ -445,9 +441,6 @@ def build(version=None,
shutil.rmtree(outdir)
os.makedirs(outdir)
if rc:
# If a release candidate, update the version information accordingly
version = "{}rc{}".format(version, rc)
logging.info("Using version '{}' for build.".format(version))
tmp_build_dir = create_temp_dir()
@@ -540,7 +533,7 @@ def generate_sig_from_file(path):
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, version, nightly=False, rc=None, iteration=1, static=False, release=False):
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
@@ -564,10 +557,12 @@ def package(build_output, version, nightly=False, rc=None, iteration=1, static=F
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows" or static or "static_" in arch:
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
@@ -592,7 +587,7 @@ def package(build_output, version, nightly=False, rc=None, iteration=1, static=F
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = PACKAGE_NAME
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
@@ -604,17 +599,12 @@ def package(build_output, version, nightly=False, rc=None, iteration=1, static=F
package_arch = arch
if not release and not nightly:
# For non-release builds, just use the commit hash as the version
package_version = "{}~{}.{}".format(version,
get_current_branch(),
get_current_commit(short=True))
package_version = "{}~{}".format(version,
get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if rc is not None and release:
# Set iteration to 0 since it's a release candidate
package_iteration = "0.rc{}".format(rc)
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
@@ -639,18 +629,17 @@ def package(build_output, version, nightly=False, rc=None, iteration=1, static=F
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(build_root, name)
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name)
run(tar_command, shell=True)
run("mv {}.tar.gz {}".format(os.path.join(build_root, name), current_location), shell=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(build_root, name)
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True)
run("mv {}.zip {}".format(os.path.join(build_root, name), current_location), shell=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
@@ -681,7 +670,6 @@ def package(build_output, version, nightly=False, rc=None, iteration=1, static=F
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
# Strip iteration from package name
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
@@ -698,9 +686,6 @@ def package(build_output, version, nightly=False, rc=None, iteration=1, static=F
def main(args):
global PACKAGE_NAME
if args.nightly and args.rc:
logging.error("Cannot be both a nightly and a release candidate.")
return 1
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
@@ -710,8 +695,6 @@ def main(args):
args.version = "{}~n{}".format(args.version,
datetime.utcnow().strftime("%Y%m%d%H%M"))
args.iteration = 0
elif args.rc:
args.iteration = 0
# Pre-build checks
check_environ()
@@ -778,7 +761,6 @@ def main(args):
platform=platform,
arch=arch,
nightly=args.nightly,
rc=args.rc,
race=args.race,
clean=args.clean,
outdir=od,
@@ -793,9 +775,9 @@ def main(args):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
rc=args.rc,
iteration=args.iteration,
static=args.static,
release=args.release)
@@ -844,6 +826,7 @@ if __name__ == '__main__':
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
@@ -871,14 +854,10 @@ if __name__ == '__main__':
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--rc',
metavar='<release candidate>',
type=int,
help='Release Candidate (RC) version to apply to build output')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=int,
default=1,
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',