Merge branch 'master' of github.com:influxdata/telegraf

This commit is contained in:
Rene Zbinden 2016-04-02 09:27:40 +02:00
commit 05b8159d6b
31 changed files with 1550 additions and 198 deletions

View File

@ -1,6 +1,8 @@
## v0.12.0 [unreleased] ## v0.12.0 [unreleased]
### Features ### Features
- [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file.
- [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented).
- [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension
- [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini! - [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini!
- [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert! - [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert!
@ -15,6 +17,7 @@
- [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent. - [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent.
- [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config. - [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config.
- [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug! - [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug!
- [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere!
### Bugfixes ### Bugfixes
- [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided.
@ -24,6 +27,7 @@
- [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key. - [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key.
- [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. - [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic.
- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! - [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert!
- [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk!
## v0.11.1 [2016-03-17] ## v0.11.1 [2016-03-17]

2
Godeps
View File

@ -1,4 +1,3 @@
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687 github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
@ -12,6 +11,7 @@ github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032

View File

@ -1,4 +1,3 @@
git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git 617c801af238c3af2d9e72c5d4a0f02edad03ce5
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9 github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5 github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
@ -6,22 +5,28 @@ github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857 github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4 github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99 github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d github.com/davecgh/go-spew fc32781af5e85e548d3f1abaf0fa3dbe8a72495c
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3 github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367 github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/eclipse/paho.mqtt.golang 4ab3e867810d1ec5f35157c59e965054dbf43a0d
github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967 github.com/fsouza/go-dockerclient a49c8269a6899cae30da1f8a4b82e0ce945f9967
github.com/go-ini/ini 776aa739ce9373377cd16f526cdf06cb4c89b40f
github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4 github.com/go-ole/go-ole 50055884d646dd9434f16bbb5c9801749b9bafe4
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032 github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7 github.com/golang/snappy 5979233c5d6225d4a8e438cdd0b411888449ddab
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2 github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e3fef5593c21644f2b43af55d6e17e70910b0e48 github.com/influxdata/influxdb c190778997f4154294e6160c41b90140641ac915
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0 github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720 github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36 github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1 github.com/lxn/win 9a7734ea4db26bc593d52f6a8a957afdad39c5c1
@ -32,15 +37,17 @@ github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3 github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980 github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831 github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37 github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8 github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 1de1357e7737a536c7f4ff6be7bd27977db4d2cb github.com/shirou/gopsutil 1f32ce1bb380845be7f5d174ac641a2c592c0c42
github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5 github.com/shirou/w32 ada3ba68f000aa1b58580e45c9d308fe0b7fc5c5
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744 github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3 github.com/wvanbergen/kafka 1a8639a45164fcc245d5c7b4bd3ccfbd1a0ffbf3
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8 github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8

View File

@ -9,6 +9,12 @@ To generate a file with specific inputs and outputs, you can use the
-input-filter and -output-filter flags: -input-filter and -output-filter flags:
`telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka` `telegraf -sample-config -input-filter cpu:mem:net:swap -output-filter influxdb:kafka`
## Environment Variables
Environment variables can be used anywhere in the config file, simply prepend
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
## `[global_tags]` Configuration ## `[global_tags]` Configuration
Global tags can be specific in the `[global_tags]` section of the config file in Global tags can be specific in the `[global_tags]` section of the config file in
@ -141,12 +147,12 @@ fields which begin with `time_`.
# Drop all metrics about containers for kubelet # Drop all metrics about containers for kubelet
[[inputs.prometheus]] [[inputs.prometheus]]
urls = ["http://kube-node-1:4194/metrics"] urls = ["http://kube-node-1:4194/metrics"]
namedrop = ["container_"] namedrop = ["container_*"]
# Only store rest client related metrics for kubelet # Only store rest client related metrics for kubelet
[[inputs.prometheus]] [[inputs.prometheus]]
urls = ["http://kube-node-1:4194/metrics"] urls = ["http://kube-node-1:4194/metrics"]
namepass = ["rest_client_"] namepass = ["rest_client_*"]
``` ```
#### Input config: prefix, suffix, and override #### Input config: prefix, suffix, and override

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,14 @@
package config package config
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"os"
"path/filepath" "path/filepath"
"regexp"
"sort" "sort"
"strings" "strings"
"time" "time"
@ -19,9 +22,22 @@ import (
"github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/config" "github.com/influxdata/config"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast" "github.com/influxdata/toml/ast"
) )
var (
// Default input plugins
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel",
"processes", "disk", "diskio"}
// Default output plugins
outputDefaults = []string{"influxdb"}
// envVarRe is a regex to find environment variables in the config file
envVarRe = regexp.MustCompile(`\$\w+`)
)
// Config specifies the URL/user/password for the database that telegraf // Config specifies the URL/user/password for the database that telegraf
// will be logging to, as well as all the plugins that the user has // will be logging to, as well as all the plugins that the user has
// specified // specified
@ -135,20 +151,28 @@ func (c *Config) ListTags() string {
} }
var header = `# Telegraf Configuration var header = `# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the # Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs. # declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active. # Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables. # To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate. # file would generate.
#
# Environment variables can be used anywhere in this config file, simply prepend
# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
# Global tags can be specified here in key="value" format. # Global tags can be specified here in key="value" format.
[global_tags] [global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1 # dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a" # rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
# Configuration for telegraf agent # Configuration for telegraf agent
[agent] [agent]
@ -188,32 +212,107 @@ var header = `# Telegraf Configuration
omit_hostname = false omit_hostname = false
# ###############################################################################
# OUTPUTS: # OUTPUT PLUGINS #
# ###############################################################################
` `
var pluginHeader = ` var inputHeader = `
#
# INPUTS: ###############################################################################
# # INPUT PLUGINS #
###############################################################################
` `
var serviceInputHeader = ` var serviceInputHeader = `
#
# SERVICE INPUTS: ###############################################################################
# # SERVICE INPUT PLUGINS #
###############################################################################
` `
// PrintSampleConfig prints the sample config // PrintSampleConfig prints the sample config
func PrintSampleConfig(pluginFilters []string, outputFilters []string) { func PrintSampleConfig(inputFilters []string, outputFilters []string) {
fmt.Printf(header) fmt.Printf(header)
if len(outputFilters) != 0 {
printFilteredOutputs(outputFilters, false)
} else {
printFilteredOutputs(outputDefaults, false)
// Print non-default outputs, commented
var pnames []string
for pname := range outputs.Outputs {
if !sliceContains(pname, outputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredOutputs(pnames, true)
}
fmt.Printf(inputHeader)
if len(inputFilters) != 0 {
printFilteredInputs(inputFilters, false)
} else {
printFilteredInputs(inputDefaults, false)
// Print non-default inputs, commented
var pnames []string
for pname := range inputs.Inputs {
if !sliceContains(pname, inputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredInputs(pnames, true)
}
}
func printFilteredInputs(inputFilters []string, commented bool) {
// Filter inputs
var pnames []string
for pname := range inputs.Inputs {
if sliceContains(pname, inputFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// cache service inputs to print them at the end
servInputs := make(map[string]telegraf.ServiceInput)
// for alphabetical looping:
servInputNames := []string{}
// Print Inputs
for _, pname := range pnames {
creator := inputs.Inputs[pname]
input := creator()
switch p := input.(type) {
case telegraf.ServiceInput:
servInputs[pname] = p
servInputNames = append(servInputNames, pname)
continue
}
printConfig(pname, input, "inputs", commented)
}
// Print Service Inputs
if len(servInputs) == 0 {
return
}
sort.Strings(servInputNames)
fmt.Printf(serviceInputHeader)
for _, name := range servInputNames {
printConfig(name, servInputs[name], "inputs", commented)
}
}
func printFilteredOutputs(outputFilters []string, commented bool) {
// Filter outputs // Filter outputs
var onames []string var onames []string
for oname := range outputs.Outputs { for oname := range outputs.Outputs {
if len(outputFilters) == 0 || sliceContains(oname, outputFilters) { if sliceContains(oname, outputFilters) {
onames = append(onames, oname) onames = append(onames, oname)
} }
} }
@ -223,38 +322,7 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
for _, oname := range onames { for _, oname := range onames {
creator := outputs.Outputs[oname] creator := outputs.Outputs[oname]
output := creator() output := creator()
printConfig(oname, output, "outputs") printConfig(oname, output, "outputs", commented)
}
// Filter inputs
var pnames []string
for pname := range inputs.Inputs {
if len(pluginFilters) == 0 || sliceContains(pname, pluginFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Inputs
fmt.Printf(pluginHeader)
servInputs := make(map[string]telegraf.ServiceInput)
for _, pname := range pnames {
creator := inputs.Inputs[pname]
input := creator()
switch p := input.(type) {
case telegraf.ServiceInput:
servInputs[pname] = p
continue
}
printConfig(pname, input, "inputs")
}
// Print Service Inputs
fmt.Printf(serviceInputHeader)
for name, input := range servInputs {
printConfig(name, input, "inputs")
} }
} }
@ -263,13 +331,26 @@ type printer interface {
SampleConfig() string SampleConfig() string
} }
func printConfig(name string, p printer, op string) { func printConfig(name string, p printer, op string, commented bool) {
fmt.Printf("\n# %s\n[[%s.%s]]", p.Description(), op, name) comment := ""
if commented {
comment = "# "
}
fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment,
op, name)
config := p.SampleConfig() config := p.SampleConfig()
if config == "" { if config == "" {
fmt.Printf("\n # no configuration\n") fmt.Printf("\n%s # no configuration\n\n", comment)
} else { } else {
fmt.Printf(config) lines := strings.Split(config, "\n")
for i, line := range lines {
if i == 0 || i == len(lines)-1 {
fmt.Print("\n")
continue
}
fmt.Print(comment + line + "\n")
}
} }
} }
@ -285,7 +366,7 @@ func sliceContains(name string, list []string) bool {
// PrintInputConfig prints the config usage of a single input. // PrintInputConfig prints the config usage of a single input.
func PrintInputConfig(name string) error { func PrintInputConfig(name string) error {
if creator, ok := inputs.Inputs[name]; ok { if creator, ok := inputs.Inputs[name]; ok {
printConfig(name, creator(), "inputs") printConfig(name, creator(), "inputs", false)
} else { } else {
return errors.New(fmt.Sprintf("Input %s not found", name)) return errors.New(fmt.Sprintf("Input %s not found", name))
} }
@ -295,7 +376,7 @@ func PrintInputConfig(name string) error {
// PrintOutputConfig prints the config usage of a single output. // PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string) error { func PrintOutputConfig(name string) error {
if creator, ok := outputs.Outputs[name]; ok { if creator, ok := outputs.Outputs[name]; ok {
printConfig(name, creator(), "outputs") printConfig(name, creator(), "outputs", false)
} else { } else {
return errors.New(fmt.Sprintf("Output %s not found", name)) return errors.New(fmt.Sprintf("Output %s not found", name))
} }
@ -325,44 +406,44 @@ func (c *Config) LoadDirectory(path string) error {
// LoadConfig loads the given config file and applies it to c // LoadConfig loads the given config file and applies it to c
func (c *Config) LoadConfig(path string) error { func (c *Config) LoadConfig(path string) error {
tbl, err := config.ParseFile(path) tbl, err := parseFile(path)
if err != nil { if err != nil {
return err return fmt.Errorf("Error parsing %s, %s", path, err)
} }
for name, val := range tbl.Fields { for name, val := range tbl.Fields {
subTable, ok := val.(*ast.Table) subTable, ok := val.(*ast.Table)
if !ok { if !ok {
return errors.New("invalid configuration") return fmt.Errorf("%s: invalid configuration", path)
} }
switch name { switch name {
case "agent": case "agent":
if err = config.UnmarshalTable(subTable, c.Agent); err != nil { if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
log.Printf("Could not parse [agent] config\n") log.Printf("Could not parse [agent] config\n")
return err return fmt.Errorf("Error parsing %s, %s", path, err)
} }
case "global_tags", "tags": case "global_tags", "tags":
if err = config.UnmarshalTable(subTable, c.Tags); err != nil { if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("Could not parse [global_tags] config\n") log.Printf("Could not parse [global_tags] config\n")
return err return fmt.Errorf("Error parsing %s, %s", path, err)
} }
case "outputs": case "outputs":
for pluginName, pluginVal := range subTable.Fields { for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) { switch pluginSubTable := pluginVal.(type) {
case *ast.Table: case *ast.Table:
if err = c.addOutput(pluginName, pluginSubTable); err != nil { if err = c.addOutput(pluginName, pluginSubTable); err != nil {
return err return fmt.Errorf("Error parsing %s, %s", path, err)
} }
case []*ast.Table: case []*ast.Table:
for _, t := range pluginSubTable { for _, t := range pluginSubTable {
if err = c.addOutput(pluginName, t); err != nil { if err = c.addOutput(pluginName, t); err != nil {
return err return fmt.Errorf("Error parsing %s, %s", path, err)
} }
} }
default: default:
return fmt.Errorf("Unsupported config format: %s", return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName) pluginName, path)
} }
} }
case "inputs", "plugins": case "inputs", "plugins":
@ -370,30 +451,50 @@ func (c *Config) LoadConfig(path string) error {
switch pluginSubTable := pluginVal.(type) { switch pluginSubTable := pluginVal.(type) {
case *ast.Table: case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil { if err = c.addInput(pluginName, pluginSubTable); err != nil {
return err return fmt.Errorf("Error parsing %s, %s", path, err)
} }
case []*ast.Table: case []*ast.Table:
for _, t := range pluginSubTable { for _, t := range pluginSubTable {
if err = c.addInput(pluginName, t); err != nil { if err = c.addInput(pluginName, t); err != nil {
return err return fmt.Errorf("Error parsing %s, %s", path, err)
} }
} }
default: default:
return fmt.Errorf("Unsupported config format: %s", return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName) pluginName, path)
} }
} }
// Assume it's an input input for legacy config file support if no other // Assume it's an input input for legacy config file support if no other
// identifiers are present // identifiers are present
default: default:
if err = c.addInput(name, subTable); err != nil { if err = c.addInput(name, subTable); err != nil {
return err return fmt.Errorf("Error parsing %s, %s", path, err)
} }
} }
} }
return nil return nil
} }
// parseFile loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
func parseFile(fpath string) (*ast.Table, error) {
contents, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
}
env_vars := envVarRe.FindAll(contents, -1)
for _, env_var := range env_vars {
env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$"))
if env_val != "" {
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
}
}
return toml.Parse(contents)
}
func (c *Config) addOutput(name string, table *ast.Table) error { func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) { if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil return nil

View File

@ -1,6 +1,7 @@
package config package config
import ( import (
"os"
"testing" "testing"
"time" "time"
@ -10,9 +11,52 @@ import (
"github.com/influxdata/telegraf/plugins/inputs/memcached" "github.com/influxdata/telegraf/plugins/inputs/memcached"
"github.com/influxdata/telegraf/plugins/inputs/procstat" "github.com/influxdata/telegraf/plugins/inputs/procstat"
"github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) {
c := NewConfig()
err := os.Setenv("MY_TEST_SERVER", "192.168.1.1")
assert.NoError(t, err)
err = os.Setenv("TEST_INTERVAL", "10s")
assert.NoError(t, err)
c.LoadConfig("./testdata/single_plugin_env_vars.toml")
memcached := inputs.Inputs["memcached"]().(*memcached.Memcached)
memcached.Servers = []string{"192.168.1.1"}
mConfig := &internal_models.InputConfig{
Name: "memcached",
Filter: internal_models.Filter{
NameDrop: []string{"metricname2"},
NamePass: []string{"metricname1"},
FieldDrop: []string{"other", "stuff"},
FieldPass: []string{"some", "strings"},
TagDrop: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []internal_models.TagFilter{
internal_models.TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
IsActive: true,
},
Interval: 10 * time.Second,
}
mConfig.Tags = make(map[string]string)
assert.Equal(t, memcached, c.Inputs[0].Input,
"Testdata did not produce a correct memcached struct.")
assert.Equal(t, mConfig, c.Inputs[0].Config,
"Testdata did not produce correct memcached metadata.")
}
func TestConfig_LoadSingleInput(t *testing.T) { func TestConfig_LoadSingleInput(t *testing.T) {
c := NewConfig() c := NewConfig()
c.LoadConfig("./testdata/single_plugin.toml") c.LoadConfig("./testdata/single_plugin.toml")

View File

@ -0,0 +1,11 @@
[[inputs.memcached]]
servers = ["$MY_TEST_SERVER"]
namepass = ["metricname1"]
namedrop = ["metricname2"]
fieldpass = ["some", "strings"]
fielddrop = ["other", "stuff"]
interval = "$TEST_INTERVAL"
[inputs.memcached.tagpass]
goodtag = ["mytag"]
[inputs.memcached.tagdrop]
badtag = ["othertag"]

View File

@ -24,9 +24,8 @@ type Disque struct {
var sampleConfig = ` var sampleConfig = `
## An array of URI to gather stats about. Specify an ip or hostname ## An array of URI to gather stats about. Specify an ip or hostname
## with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, ## with optional port and password.
## 10.0.0.1:10000, etc. ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
## If no servers are specified, then localhost is used as the host. ## If no servers are specified, then localhost is used as the host.
servers = ["localhost"] servers = ["localhost"]
` `

View File

@ -35,7 +35,8 @@ var sampleConfig = `
## Domains or subdomains to query. "."(root) is default ## Domains or subdomains to query. "."(root) is default
domains = ["."] # optional domains = ["."] # optional
## Query record type. Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. Default is "NS" ## Query record type. Default is "A"
## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
record_type = "A" # optional record_type = "A" # optional
## Dns server port. 53 is default ## Dns server port. 53 is default

View File

@ -22,7 +22,7 @@ const sampleConfig = `
## measurement name suffix (for separating different commands) ## measurement name suffix (for separating different commands)
name_suffix = "_mycollector" name_suffix = "_mycollector"
## Data format to consume. This can be "json", "influx", "graphite" or "nagios ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -6,7 +6,7 @@ For example, if you have a service called _mycollector_, which has HTTP endpoint
plugin like this: plugin like this:
``` ```
[[httpjson.services]] [[inputs.httpjson]]
name = "mycollector" name = "mycollector"
servers = [ servers = [
@ -24,7 +24,7 @@ plugin like this:
You can also specify which keys from server response should be considered tags: You can also specify which keys from server response should be considered tags:
``` ```
[[httpjson.services]] [[inputs.httpjson]]
... ...
tag_keys = [ tag_keys = [
@ -36,10 +36,10 @@ You can also specify which keys from server response should be considered tags:
You can also specify additional request parameters for the service: You can also specify additional request parameters for the service:
``` ```
[[httpjson.services]] [[inputs.httpjson]]
... ...
[httpjson.services.parameters] [inputs.httpjson.parameters]
event_type = "cpu_spike" event_type = "cpu_spike"
threshold = "0.75" threshold = "0.75"
@ -48,10 +48,10 @@ You can also specify additional request parameters for the service:
You can also specify additional request header parameters for the service: You can also specify additional request header parameters for the service:
``` ```
[[httpjson.services]] [[inputs.httpjson]]
... ...
[httpjson.services.headers] [inputs.httpjson.headers]
X-Auth-Token = "my-xauth-token" X-Auth-Token = "my-xauth-token"
apiVersion = "v1" apiVersion = "v1"
``` ```
@ -61,17 +61,13 @@ You can also specify additional request header parameters for the service:
Let's say that we have a service named "mycollector" configured like this: Let's say that we have a service named "mycollector" configured like this:
``` ```
[httpjson] [[inputs.httpjson]]
[[httpjson.services]]
name = "mycollector" name = "mycollector"
servers = [ servers = [
"http://my.service.com/_stats" "http://my.service.com/_stats"
] ]
# HTTP method to use (case-sensitive) # HTTP method to use (case-sensitive)
method = "GET" method = "GET"
tag_keys = ["service"] tag_keys = ["service"]
``` ```
@ -102,24 +98,19 @@ There is also the option to collect JSON from multiple services, here is an
example doing that. example doing that.
``` ```
[httpjson] [[inputs.httpjson]]
[[httpjson.services]]
name = "mycollector1" name = "mycollector1"
servers = [ servers = [
"http://my.service1.com/_stats" "http://my.service1.com/_stats"
] ]
# HTTP method to use (case-sensitive) # HTTP method to use (case-sensitive)
method = "GET" method = "GET"
[[httpjson.services]] [[inputs.httpjson]]
name = "mycollector2" name = "mycollector2"
servers = [ servers = [
"http://service.net/json/stats" "http://service.net/json/stats"
] ]
# HTTP method to use (case-sensitive) # HTTP method to use (case-sensitive)
method = "POST" method = "POST"
``` ```

View File

@ -56,7 +56,7 @@ var sampleConfig = `
## Offset (must be either "oldest" or "newest") ## Offset (must be either "oldest" or "newest")
offset = "oldest" offset = "oldest"
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -34,7 +34,16 @@ var sampleConfig = `
# A list of Mesos masters, default value is localhost:5050. # A list of Mesos masters, default value is localhost:5050.
masters = ["localhost:5050"] masters = ["localhost:5050"]
# Metrics groups to be collected, by default, all enabled. # Metrics groups to be collected, by default, all enabled.
master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] master_collections = [
"resources",
"master",
"system",
"slaves",
"frameworks",
"messages",
"evqueue",
"registrar",
]
` `
// SampleConfig returns a sample configuration block // SampleConfig returns a sample configuration block

View File

@ -11,7 +11,7 @@ import (
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" "github.com/eclipse/paho.mqtt.golang"
) )
type MQTTConsumer struct { type MQTTConsumer struct {
@ -39,7 +39,7 @@ type MQTTConsumer struct {
InsecureSkipVerify bool InsecureSkipVerify bool
sync.Mutex sync.Mutex
client *mqtt.Client client mqtt.Client
// channel of all incoming raw mqtt messages // channel of all incoming raw mqtt messages
in chan mqtt.Message in chan mqtt.Message
done chan struct{} done chan struct{}
@ -78,7 +78,7 @@ var sampleConfig = `
## Use SSL but skip chain & host verification ## Use SSL but skip chain & host verification
# insecure_skip_verify = false # insecure_skip_verify = false
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
@ -163,7 +163,7 @@ func (m *MQTTConsumer) receiver() {
} }
} }
func (m *MQTTConsumer) recvMessage(_ *mqtt.Client, msg mqtt.Message) { func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) {
m.in <- msg m.in <- msg
} }

View File

@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" "github.com/eclipse/paho.mqtt.golang"
) )
const ( const (

View File

@ -55,7 +55,7 @@ var sampleConfig = `
## name a queue group ## name a queue group
queue_group = "telegraf_consumers" queue_group = "telegraf_consumers"
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -18,6 +18,7 @@
- reading - reading
- requests - requests
- waiting - waiting
- writing
### Tags: ### Tags:

View File

@ -122,6 +122,11 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error {
fcgiIp := socketAddr[0] fcgiIp := socketAddr[0]
fcgiPort, _ := strconv.Atoi(socketAddr[1]) fcgiPort, _ := strconv.Atoi(socketAddr[1])
fcgi, err = newFcgiClient(fcgiIp, fcgiPort) fcgi, err = newFcgiClient(fcgiIp, fcgiPort)
if len(u.Path) > 1 {
statusPath = strings.Trim(u.Path, "/")
} else {
statusPath = "status"
}
} else { } else {
socketAddr := strings.Split(addr, ":") socketAddr := strings.Split(addr, ":")
if len(socketAddr) >= 2 { if len(socketAddr) >= 2 {

View File

@ -26,7 +26,8 @@ var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_rese
var sampleConfig = ` var sampleConfig = `
## specify address via a url matching: ## specify address via a url matching:
## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] ## postgres://[pqgotest[:password]]@localhost[/dbname]\
## ?sslmode=[disable|verify-ca|verify-full]
## or a simple string: ## or a simple string:
## host=localhost user=pqotest password=... sslmode=... dbname=app_production ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
## ##

View File

@ -38,38 +38,41 @@ type query []struct {
var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true}
var sampleConfig = ` var sampleConfig = `
# specify address via a url matching: ## specify address via a url matching:
# postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# or a simple string: ## ?sslmode=[disable|verify-ca|verify-full]
# host=localhost user=pqotest password=... sslmode=... dbname=app_production ## or a simple string:
## host=localhost user=pqotest password=... sslmode=... dbname=app_production
# #
# All connection parameters are optional. # ## All connection parameters are optional. #
# Without the dbname parameter, the driver will default to a database ## Without the dbname parameter, the driver will default to a database
# with the same name as the user. This dbname is just for instantiating a ## with the same name as the user. This dbname is just for instantiating a
# connection with the server and doesn't restrict the databases we are trying ## connection with the server and doesn't restrict the databases we are trying
# to grab metrics for. ## to grab metrics for.
# #
address = "host=localhost user=postgres sslmode=disable" address = "host=localhost user=postgres sslmode=disable"
# A list of databases to pull metrics about. If not specified, metrics for all ## A list of databases to pull metrics about. If not specified, metrics for all
# databases are gathered. ## databases are gathered.
# databases = ["app_production", "testing"] ## databases = ["app_production", "testing"]
# #
# Define the toml config where the sql queries are stored ## Define the toml config where the sql queries are stored
# New queries can be added, if the withdbname is set to true and there is no databases defined ## New queries can be added, if the withdbname is set to true and there is no
# in the 'databases field', the sql query is ended by a 'is not null' in order to make the query ## databases defined in the 'databases field', the sql query is ended by a
# succeed. ## 'is not null' in order to make the query succeed.
# Example : ## Example :
# The sqlquery : "SELECT * FROM pg_stat_database where datname" become "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
# because the databases variable was set to ['postgres', 'pgbench' ] and the withdbname was true. ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
# Be careful that if the withdbname is set to false you d'ont have to define the where clause (aka with the dbname) ## because the databases variable was set to ['postgres', 'pgbench' ] and the
# the tagvalue field is used to define custom tags (separated by comas) ## withdbname was true. Be careful that if the withdbname is set to false you
## don't have to define the where clause (aka with the dbname) the tagvalue
## field is used to define custom tags (separated by comas)
# #
# Structure : ## Structure :
# [[inputs.postgresql_extensible.query]] ## [[inputs.postgresql_extensible.query]]
# sqlquery string ## sqlquery string
# version string ## version string
# withdbname boolean ## withdbname boolean
# tagvalue string (coma separated) ## tagvalue string (coma separated)
[[inputs.postgresql_extensible.query]] [[inputs.postgresql_extensible.query]]
sqlquery="SELECT * FROM pg_stat_database" sqlquery="SELECT * FROM pg_stat_database"
version=901 version=901

View File

@ -26,9 +26,9 @@ var sampleConfig = `
## An array of urls to scrape metrics from. ## An array of urls to scrape metrics from.
urls = ["http://localhost:9100/metrics"] urls = ["http://localhost:9100/metrics"]
### Use SSL but skip chain & host verification ## Use SSL but skip chain & host verification
# insecure_skip_verify = false # insecure_skip_verify = false
### Use bearer token for authorization ## Use bearer token for authorization
# bearer_token = /path/to/bearer/token # bearer_token = /path/to/bearer/token
` `

View File

@ -178,7 +178,6 @@ var sampleConfig = `
max_repetition = 127 max_repetition = 127
oid = "ifOutOctets" oid = "ifOutOctets"
[[inputs.snmp.host]] [[inputs.snmp.host]]
address = "192.168.2.13:161" address = "192.168.2.13:161"
#address = "127.0.0.1:161" #address = "127.0.0.1:161"
@ -221,8 +220,6 @@ var sampleConfig = `
# if empty get all subtables # if empty get all subtables
# sub_tables could be not "real subtables" # sub_tables could be not "real subtables"
sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
` `
// SampleConfig returns sample configuration message // SampleConfig returns sample configuration message

View File

@ -53,7 +53,7 @@ const sampleConfig = `
## Maximum number of concurrent TCP connections to allow ## Maximum number of concurrent TCP connections to allow
max_tcp_connections = 250 max_tcp_connections = 250
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -48,7 +48,7 @@ const sampleConfig = `
## usually 1500 bytes, but can be as large as 65,535 bytes. ## usually 1500 bytes, but can be as large as 65,535 bytes.
udp_packet_size = 1500 udp_packet_size = 1500
## Data format to consume. This can be "json", "influx" or "graphite" ## Data format to consume.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md

View File

@ -89,7 +89,7 @@ var sampleConfig = `
## Use SSL but skip chain & host verification ## Use SSL but skip chain & host verification
# insecure_skip_verify = false # insecure_skip_verify = false
## Data format to output. This can be "influx" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md

View File

@ -23,7 +23,7 @@ var sampleConfig = `
## Files to write to, "stdout" is a specially handled file. ## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"] files = ["stdout", "/tmp/metrics.out"]
## Data format to output. This can be "influx" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md

View File

@ -19,6 +19,12 @@ type Kafka struct {
Topic string Topic string
// Routing Key Tag // Routing Key Tag
RoutingTag string `toml:"routing_tag"` RoutingTag string `toml:"routing_tag"`
// Compression Codec Tag
CompressionCodec int
// RequiredAcks Tag
RequiredAcks int
// MaxRetry Tag
MaxRetry int
// Legacy SSL config options // Legacy SSL config options
// TLS client certificate // TLS client certificate
@ -53,6 +59,32 @@ var sampleConfig = `
## ie, if this tag exists, it's value will be used as the routing key ## ie, if this tag exists, it's value will be used as the routing key
routing_tag = "host" routing_tag = "host"
## CompressionCodec represents the various compression codecs recognized by
## Kafka in messages.
## 0 : No compression
## 1 : Gzip compression
## 2 : Snappy compression
compression_codec = 0
## RequiredAcks is used in Produce Requests to tell the broker how many
## replica acknowledgements it must see before responding
## 0 : the producer never waits for an acknowledgement from the broker.
## This option provides the lowest latency but the weakest durability
## guarantees (some data will be lost when a server fails).
## 1 : the producer gets an acknowledgement after the leader replica has
## received the data. This option provides better durability as the
## client waits until the server acknowledges the request as successful
## (only messages that were written to the now-dead leader but not yet
## replicated will be lost).
## -1: the producer gets an acknowledgement after all in-sync replicas have
## received the data. This option provides the best durability, we
## guarantee that no messages will be lost as long as at least one in
## sync replica remains.
required_acks = -1
## The total number of times to retry sending a message
max_retry = 3
## Optional SSL Config ## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem" # ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem" # ssl_cert = "/etc/telegraf/cert.pem"
@ -60,7 +92,7 @@ var sampleConfig = `
## Use SSL but skip chain & host verification ## Use SSL but skip chain & host verification
# insecure_skip_verify = false # insecure_skip_verify = false
## Data format to output. This can be "influx" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
@ -73,10 +105,10 @@ func (k *Kafka) SetSerializer(serializer serializers.Serializer) {
func (k *Kafka) Connect() error { func (k *Kafka) Connect() error {
config := sarama.NewConfig() config := sarama.NewConfig()
// Wait for all in-sync replicas to ack the message
config.Producer.RequiredAcks = sarama.WaitForAll config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks)
// Retry up to 10 times to produce the message config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec)
config.Producer.Retry.Max = 10 config.Producer.Retry.Max = k.MaxRetry
// Legacy support ssl config // Legacy support ssl config
if k.Certificate != "" { if k.Certificate != "" {

View File

@ -10,7 +10,7 @@ import (
"github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers"
paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" paho "github.com/eclipse/paho.mqtt.golang"
) )
var sampleConfig = ` var sampleConfig = `
@ -32,7 +32,7 @@ var sampleConfig = `
## Use SSL but skip chain & host verification ## Use SSL but skip chain & host verification
# insecure_skip_verify = false # insecure_skip_verify = false
## Data format to output. This can be "influx" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
@ -57,7 +57,7 @@ type MQTT struct {
// Use SSL but skip chain & host verification // Use SSL but skip chain & host verification
InsecureSkipVerify bool InsecureSkipVerify bool
client *paho.Client client paho.Client
opts *paho.ClientOptions opts *paho.ClientOptions
serializer serializers.Serializer serializer serializers.Serializer

View File

@ -24,7 +24,7 @@ var sampleConfig = `
## NSQ topic for producer messages ## NSQ topic for producer messages
topic = "telegraf" topic = "telegraf"
## Data format to output. This can be "influx" or "graphite" ## Data format to output.
## Each data format has it's own unique set of configuration options, read ## Each data format has it's own unique set of configuration options, read
## more about them here: ## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md

View File

@ -64,7 +64,7 @@ elif [[ -f /etc/debian_version ]]; then
which systemctl &>/dev/null which systemctl &>/dev/null
if [[ $? -eq 0 ]]; then if [[ $? -eq 0 ]]; then
install_systemd install_systemd
deb-systemd-invoke restart telegraf.service systemctl restart telegraf
else else
# Assuming sysv # Assuming sysv
install_init install_init