Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Roman Statsevich 2015-10-27 17:33:28 +03:00
commit c80b085ba4
108 changed files with 4094 additions and 1688 deletions

View File

@ -2,6 +2,19 @@
### Release Notes ### Release Notes
- The -test flag will now only output 2 collections for plugins that need it - The -test flag will now only output 2 collections for plugins that need it
- There is a new agent configuration option: `flush_interval`. This option tells
Telegraf how often to flush data to InfluxDB and other output sinks. For example,
users can set `interval = "2s"` and `flush_interval = "60s"` for Telegraf to
collect data every 2 seconds, and flush every 60 seconds.
- `precision` and `utc` are no longer valid agent config values. `precision` has
moved to the `influxdb` output config, where it will continue to default to "s"
- debug and test output will now print the raw line-protocol string
- Telegraf will now, by default, round the collection interval to the nearest
even interval. This means that `interval="10s"` will collect every :00, :10, etc.
To ease scale concerns, flushing will be "jittered" by a random amount so that
all Telegraf instances do not flush at the same time. Both of these options can
be controlled via the `round_interval` and `flush_jitter` config options.
- Telegraf will now retry metric flushes twice
### Features ### Features
- [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info - [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info
@ -18,6 +31,14 @@ of metrics collected and from how many plugins.
- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive! - [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
- [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc - [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc
- [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive! - [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
- [#280](https://github.com/influxdb/telegraf/issues/280): Use InfluxDB client v2.
- [#281](https://github.com/influxdb/telegraf/issues/281): Eliminate need to deep copy Batch Points.
- [#286](https://github.com/influxdb/telegraf/issues/286): bcache plugin, thanks @cornerot!
- [#287](https://github.com/influxdb/telegraf/issues/287): Batch AMQP output, thanks @ekini!
- [#301](https://github.com/influxdb/telegraf/issues/301): Collect on even intervals
- [#298](https://github.com/influxdb/telegraf/pull/298): Support retrying output writes
- [#300](https://github.com/influxdb/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
- [#322](https://github.com/influxdb/telegraf/issues/322): Librato output. Thanks @jipperinbham!
### Bugfixes ### Bugfixes
- [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini! - [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
@ -25,6 +46,9 @@ of metrics collected and from how many plugins.
- [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini! - [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
- [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini! - [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
- [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac! - [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
- [#290](https://github.com/influxdb/telegraf/issues/290): Fix some plugins sending their values as strings.
- [#289](https://github.com/influxdb/telegraf/issues/289): Fix accumulator panic on nil tags.
- [#302](https://github.com/influxdb/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
## v0.1.9 [2015-09-22] ## v0.1.9 [2015-09-22]

View File

@ -36,11 +36,14 @@ type Plugin interface {
} }
type Accumulator interface { type Accumulator interface {
Add(measurement string, value interface{}, tags map[string]string) Add(measurement string,
AddFieldsWithTime(measurement string, value interface{},
values map[string]interface{},
tags map[string]string, tags map[string]string,
timestamp time.Time) timestamp ...time.Time)
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
timestamp ...time.Time)
} }
``` ```
@ -81,8 +84,8 @@ func Gather(acc plugins.Accumulator) error {
"pid": fmt.Sprintf("%d", process.Pid), "pid": fmt.Sprintf("%d", process.Pid),
} }
acc.Add("cpu", process.CPUTime, tags) acc.Add("cpu", process.CPUTime, tags, time.Now())
acc.Add("memory", process.MemoryBytes, tags) acc.Add("memory", process.MemoryBytes, tags, time.Now())
} }
} }
``` ```
@ -179,7 +182,7 @@ type Output interface {
Close() error Close() error
Description() string Description() string
SampleConfig() string SampleConfig() string
Write(client.BatchPoints) error Write(points []*client.Point) error
} }
``` ```
@ -214,8 +217,8 @@ func (s *Simple) Close() error {
return nil return nil
} }
func (s *Simple) Write(bp client.BatchPoints) error { func (s *Simple) Write(points []*client.Point) error {
for _, pt := range bp { for _, pt := range points {
// write `pt` to the output sink here // write `pt` to the output sink here
} }
return nil return nil

9
Godeps/Godeps.json generated
View File

@ -102,8 +102,8 @@
}, },
{ {
"ImportPath": "github.com/influxdb/influxdb", "ImportPath": "github.com/influxdb/influxdb",
"Comment": "v0.9.4-rc1-652-gd9f0413", "Comment": "v0.9.4-rc1-703-g956efae",
"Rev": "d9f04132ef567bb9671690e4db226ff3dab9feb5" "Rev": "956efaeb94ee57ecd8dc23e2f654b5231204e28f"
}, },
{ {
"ImportPath": "github.com/lib/pq", "ImportPath": "github.com/lib/pq",
@ -218,6 +218,11 @@
"Comment": "v1.0-21-gf552045", "Comment": "v1.0-21-gf552045",
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1" "Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
}, },
{
"ImportPath": "github.com/stretchr/testify/suite",
"Comment": "v1.0-21-gf552045",
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
},
{ {
"ImportPath": "github.com/wvanbergen/kafka/consumergroup", "ImportPath": "github.com/wvanbergen/kafka/consumergroup",
"Rev": "b0e5c20a0d7c3ccfd37a5965ae30a3a0fd15945d" "Rev": "b0e5c20a0d7c3ccfd37a5965ae30a3a0fd15945d"

View File

@ -16,11 +16,11 @@
- [#4310](https://github.com/influxdb/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou - [#4310](https://github.com/influxdb/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou
- [#4348](https://github.com/influxdb/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. - [#4348](https://github.com/influxdb/influxdb/pull/4348): Public ApplyTemplate function for graphite parser.
- [#4178](https://github.com/influxdb/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! - [#4178](https://github.com/influxdb/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert!
- [#4291](https://github.com/influxdb/influxdb/pull/4291): Added ALTER DATABASE RENAME. Thanks @linearb
- [#4409](https://github.com/influxdb/influxdb/pull/4409): wire up INTO queries. - [#4409](https://github.com/influxdb/influxdb/pull/4409): wire up INTO queries.
- [#4379](https://github.com/influxdb/influxdb/pull/4379): Auto-create database for UDP input. - [#4379](https://github.com/influxdb/influxdb/pull/4379): Auto-create database for UDP input.
- [#4375](https://github.com/influxdb/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. - [#4375](https://github.com/influxdb/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party.
- [#4459](https://github.com/influxdb/influxdb/pull/4459): Register with Enterprise service if token available. - [#4506](https://github.com/influxdb/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available.
- [#4501](https://github.com/influxdb/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex.
### Bugfixes ### Bugfixes
- [#4389](https://github.com/influxdb/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. - [#4389](https://github.com/influxdb/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle.
@ -50,6 +50,7 @@
- [#4465](https://github.com/influxdb/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. - [#4465](https://github.com/influxdb/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database.
- [#4342](https://github.com/influxdb/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. - [#4342](https://github.com/influxdb/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh.
- [#4349](https://github.com/influxdb/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. - [#4349](https://github.com/influxdb/influxdb/issues/4349): If HH can't unmarshal a block, skip that block.
- [#4502](https://github.com/influxdb/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib
- [#4354](https://github.com/influxdb/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. - [#4354](https://github.com/influxdb/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters.
- [#4357](https://github.com/influxdb/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! - [#4357](https://github.com/influxdb/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski!
- [#4344](https://github.com/influxdb/influxdb/issues/4344): Make client.Write default to client.precision if none is given. - [#4344](https://github.com/influxdb/influxdb/issues/4344): Make client.Write default to client.precision if none is given.
@ -71,6 +72,9 @@
- [#4415](https://github.com/influxdb/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp - [#4415](https://github.com/influxdb/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp
- [#4472](https://github.com/influxdb/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error - [#4472](https://github.com/influxdb/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error
- [#4475](https://github.com/influxdb/influxdb/issues/4475): Fix SHOW TAG VALUES error message. - [#4475](https://github.com/influxdb/influxdb/issues/4475): Fix SHOW TAG VALUES error message.
- [#4486](https://github.com/influxdb/influxdb/pull/4486): Fix missing comments for runner package
- [#4497](https://github.com/influxdb/influxdb/pull/4497): Fix sequence in meta proto
- [#3367](https://github.com/influxdb/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol.
## v0.9.4 [2015-09-14] ## v0.9.4 [2015-09-14]

View File

@ -66,7 +66,7 @@ To assist in review for the PR, please add the following to your pull request co
Use of third-party packages Use of third-party packages
------------ ------------
A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarly. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) as the storage engine. So to maximise the chance your change will be accepted by us, use only the standard libaries, or the third-party packages we have decided to use. A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) as the storage engine. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use.
For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/).
@ -236,7 +236,7 @@ Note that when you pass the binary to `go tool pprof` *you must specify the path
Continuous Integration testing Continuous Integration testing
----- -----
InfluxDB uses CirceCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdb/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file. InfluxDB uses CircleCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdb/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file.
Useful links Useful links
------------ ------------

View File

@ -168,7 +168,7 @@ func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
} }
res = response.Results res = response.Results
} }
return response, nil return res, nil
} }
``` ```

View File

@ -19,7 +19,7 @@ func ExampleNewClient() {
} }
// NOTE: this assumes you've setup a user and have setup shell env variables, // NOTE: this assumes you've setup a user and have setup shell env variables,
// namely INFLUX_USER/INFLUX_PWD. If not just ommit Username/Password below. // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.
conf := client.Config{ conf := client.Config{
URL: *host, URL: *host,
Username: os.Getenv("INFLUX_USER"), Username: os.Getenv("INFLUX_USER"),

View File

@ -218,6 +218,31 @@ func (p *Point) PrecisionString(precison string) string {
return p.pt.PrecisionString(precison) return p.pt.PrecisionString(precison)
} }
// Name returns the measurement name of the point
func (p *Point) Name() string {
return p.pt.Name()
}
// Name returns the tags associated with the point
func (p *Point) Tags() map[string]string {
return p.pt.Tags()
}
// Time return the timestamp for the point
func (p *Point) Time() time.Time {
return p.pt.Time()
}
// UnixNano returns the unix nano time of the point
func (p *Point) UnixNano() int64 {
return p.pt.UnixNano()
}
// Fields returns the fields for the point
func (p *Point) Fields() map[string]interface{} {
return p.pt.Fields()
}
func (c *client) Write(bp BatchPoints) error { func (c *client) Write(bp BatchPoints) error {
u := c.url u := c.url
u.Path = "write" u.Path = "write"

View File

@ -5,6 +5,7 @@ import (
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
"reflect"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -186,6 +187,54 @@ func TestClient_PointWithoutTimeString(t *testing.T) {
} }
} }
func TestClient_PointName(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields)
exp := "cpu_usage"
if p.Name() != exp {
t.Errorf("Error, got %s, expected %s",
p.Name(), exp)
}
}
func TestClient_PointTags(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields)
if !reflect.DeepEqual(tags, p.Tags()) {
t.Errorf("Error, got %v, expected %v",
p.Tags(), tags)
}
}
func TestClient_PointUnixNano(t *testing.T) {
const shortForm = "2006-Jan-02"
time1, _ := time.Parse(shortForm, "2013-Feb-03")
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields, time1)
exp := int64(1359849600000000000)
if p.UnixNano() != exp {
t.Errorf("Error, got %d, expected %d",
p.UnixNano(), exp)
}
}
func TestClient_PointFields(t *testing.T) {
tags := map[string]string{"cpu": "cpu-total"}
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
p := NewPoint("cpu_usage", tags, fields)
if !reflect.DeepEqual(fields, p.Fields()) {
t.Errorf("Error, got %v, expected %v",
p.Fields(), fields)
}
}
func TestBatchPoints_PrecisionError(t *testing.T) { func TestBatchPoints_PrecisionError(t *testing.T) {
_, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"}) _, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"})
if err == nil { if err == nil {

View File

@ -15,7 +15,7 @@ func ExampleNewClient() client.Client {
u, _ := url.Parse("http://localhost:8086") u, _ := url.Parse("http://localhost:8086")
// NOTE: this assumes you've setup a user and have setup shell env variables, // NOTE: this assumes you've setup a user and have setup shell env variables,
// namely INFLUX_USER/INFLUX_PWD. If not just ommit Username/Password below. // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.
client := client.NewClient(client.Config{ client := client.NewClient(client.Config{
URL: u, URL: u,
Username: os.Getenv("INFLUX_USER"), Username: os.Getenv("INFLUX_USER"),

View File

@ -323,7 +323,7 @@ func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPo
// If the write consistency level is ANY, then a successful hinted handoff can // If the write consistency level is ANY, then a successful hinted handoff can
// be considered a successful write so send nil to the response channel // be considered a successful write so send nil to the response channel
// otherwise, let the original error propogate to the response channel // otherwise, let the original error propagate to the response channel
if hherr == nil && consistency == ConsistencyLevelAny { if hherr == nil && consistency == ConsistencyLevelAny {
ch <- &AsyncWriteResult{owner, nil} ch <- &AsyncWriteResult{owner, nil}
return return

View File

@ -228,9 +228,9 @@ func TestPointsWriter_WritePoints(t *testing.T) {
expErr: nil, expErr: nil,
}, },
// Write to non-existant database // Write to non-existent database
{ {
name: "write to non-existant database", name: "write to non-existent database",
database: "doesnt_exist", database: "doesnt_exist",
retentionPolicy: "", retentionPolicy: "",
consistency: cluster.ConsistencyLevelAny, consistency: cluster.ConsistencyLevelAny,

View File

@ -165,11 +165,11 @@ func readIds(path string) (map[string]uint64, error) {
} }
return ids, err return ids, err
} }
func readIndex(f *os.File) *tsmIndex { func readIndex(f *os.File) (*tsmIndex, error) {
// Get the file size // Get the file size
stat, err := f.Stat() stat, err := f.Stat()
if err != nil { if err != nil {
panic(err.Error()) return nil, err
} }
// Seek to the series count // Seek to the series count
@ -177,8 +177,7 @@ func readIndex(f *os.File) *tsmIndex {
b := make([]byte, 8) b := make([]byte, 8)
_, err = f.Read(b[:4]) _, err = f.Read(b[:4])
if err != nil { if err != nil {
fmt.Printf("error: %v\n", err.Error()) return nil, err
os.Exit(1)
} }
seriesCount := binary.BigEndian.Uint32(b) seriesCount := binary.BigEndian.Uint32(b)
@ -206,6 +205,10 @@ func readIndex(f *os.File) *tsmIndex {
series: count, series: count,
} }
if indexStart < 0 {
return nil, fmt.Errorf("index corrupt: offset=%d", indexStart)
}
// Read the index entries // Read the index entries
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
f.Read(b) f.Read(b)
@ -215,7 +218,7 @@ func readIndex(f *os.File) *tsmIndex {
index.blocks = append(index.blocks, &block{id: id, offset: int64(pos)}) index.blocks = append(index.blocks, &block{id: id, offset: int64(pos)})
} }
return index return index, nil
} }
func cmdDumpTsm1(opts *tsdmDumpOpts) { func cmdDumpTsm1(opts *tsdmDumpOpts) {
@ -254,7 +257,19 @@ func cmdDumpTsm1(opts *tsdmDumpOpts) {
invIds[v] = k invIds[v] = k
} }
index := readIndex(f) index, err := readIndex(f)
if err != nil {
println("Failed to readIndex:", err.Error())
// Create a stubbed out index so we can still try and read the block data directly
// w/o panicing ourselves.
index = &tsmIndex{
minTime: time.Unix(0, 0),
maxTime: time.Unix(0, 0),
offset: stat.Size(),
}
}
blockStats := &blockStats{} blockStats := &blockStats{}
println("Summary:") println("Summary:")

View File

@ -22,20 +22,20 @@ import (
"github.com/influxdb/influxdb/services/httpd" "github.com/influxdb/influxdb/services/httpd"
"github.com/influxdb/influxdb/services/opentsdb" "github.com/influxdb/influxdb/services/opentsdb"
"github.com/influxdb/influxdb/services/precreator" "github.com/influxdb/influxdb/services/precreator"
"github.com/influxdb/influxdb/services/registration"
"github.com/influxdb/influxdb/services/retention" "github.com/influxdb/influxdb/services/retention"
"github.com/influxdb/influxdb/services/subscriber" "github.com/influxdb/influxdb/services/subscriber"
"github.com/influxdb/influxdb/services/udp" "github.com/influxdb/influxdb/services/udp"
"github.com/influxdb/influxdb/tsdb" "github.com/influxdb/influxdb/tsdb"
) )
const DefaultEnterpriseURL = "https://enterprise.influxdata.com"
// Config represents the configuration format for the influxd binary. // Config represents the configuration format for the influxd binary.
type Config struct { type Config struct {
Meta *meta.Config `toml:"meta"` Meta *meta.Config `toml:"meta"`
Data tsdb.Config `toml:"data"` Data tsdb.Config `toml:"data"`
Cluster cluster.Config `toml:"cluster"` Cluster cluster.Config `toml:"cluster"`
Retention retention.Config `toml:"retention"` Retention retention.Config `toml:"retention"`
Registration registration.Config `toml:"registration"`
Precreator precreator.Config `toml:"shard-precreation"` Precreator precreator.Config `toml:"shard-precreation"`
Admin admin.Config `toml:"admin"` Admin admin.Config `toml:"admin"`
@ -54,19 +54,15 @@ type Config struct {
// Server reporting // Server reporting
ReportingDisabled bool `toml:"reporting-disabled"` ReportingDisabled bool `toml:"reporting-disabled"`
// Server registration
EnterpriseURL string `toml:"enterprise-url"`
EnterpriseToken string `toml:"enterprise-token"`
} }
// NewConfig returns an instance of Config with reasonable defaults. // NewConfig returns an instance of Config with reasonable defaults.
func NewConfig() *Config { func NewConfig() *Config {
c := &Config{} c := &Config{}
c.EnterpriseURL = DefaultEnterpriseURL
c.Meta = meta.NewConfig() c.Meta = meta.NewConfig()
c.Data = tsdb.NewConfig() c.Data = tsdb.NewConfig()
c.Cluster = cluster.NewConfig() c.Cluster = cluster.NewConfig()
c.Registration = registration.NewConfig()
c.Precreator = precreator.NewConfig() c.Precreator = precreator.NewConfig()
c.Admin = admin.NewConfig() c.Admin = admin.NewConfig()

View File

@ -13,8 +13,6 @@ func TestConfig_Parse(t *testing.T) {
// Parse configuration. // Parse configuration.
var c run.Config var c run.Config
if _, err := toml.Decode(` if _, err := toml.Decode(`
enterprise-token = "deadbeef"
[meta] [meta]
dir = "/tmp/meta" dir = "/tmp/meta"
@ -57,9 +55,7 @@ enabled = true
} }
// Validate configuration. // Validate configuration.
if c.EnterpriseToken != "deadbeef" { if c.Meta.Dir != "/tmp/meta" {
t.Fatalf("unexpected Enterprise token: %s", c.EnterpriseToken)
} else if c.Meta.Dir != "/tmp/meta" {
t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) t.Fatalf("unexpected meta dir: %s", c.Meta.Dir)
} else if c.Data.Dir != "/tmp/data" { } else if c.Data.Dir != "/tmp/data" {
t.Fatalf("unexpected data dir: %s", c.Data.Dir) t.Fatalf("unexpected data dir: %s", c.Data.Dir)
@ -91,8 +87,6 @@ func TestConfig_Parse_EnvOverride(t *testing.T) {
// Parse configuration. // Parse configuration.
var c run.Config var c run.Config
if _, err := toml.Decode(` if _, err := toml.Decode(`
enterprise-token = "deadbeef"
[meta] [meta]
dir = "/tmp/meta" dir = "/tmp/meta"
@ -131,10 +125,6 @@ enabled = true
t.Fatal(err) t.Fatal(err)
} }
if err := os.Setenv("INFLUXDB_ENTERPRISE_TOKEN", "wheresthebeef"); err != nil {
t.Fatalf("failed to set env var: %v", err)
}
if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil { if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil {
t.Fatalf("failed to set env var: %v", err) t.Fatalf("failed to set env var: %v", err)
} }
@ -147,10 +137,6 @@ enabled = true
t.Fatalf("failed to apply env overrides: %v", err) t.Fatalf("failed to apply env overrides: %v", err)
} }
if c.EnterpriseToken != "wheresthebeef" {
t.Fatalf("unexpected Enterprise token: %s", c.EnterpriseToken)
}
if c.UDPs[0].BindAddress != ":4444" { if c.UDPs[0].BindAddress != ":4444" {
t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress)
} }

View File

@ -2,9 +2,7 @@ package run
import ( import (
"bytes" "bytes"
"encoding/json"
"fmt" "fmt"
"io/ioutil"
"log" "log"
"net" "net"
"net/http" "net/http"
@ -26,6 +24,7 @@ import (
"github.com/influxdb/influxdb/services/httpd" "github.com/influxdb/influxdb/services/httpd"
"github.com/influxdb/influxdb/services/opentsdb" "github.com/influxdb/influxdb/services/opentsdb"
"github.com/influxdb/influxdb/services/precreator" "github.com/influxdb/influxdb/services/precreator"
"github.com/influxdb/influxdb/services/registration"
"github.com/influxdb/influxdb/services/retention" "github.com/influxdb/influxdb/services/retention"
"github.com/influxdb/influxdb/services/snapshotter" "github.com/influxdb/influxdb/services/snapshotter"
"github.com/influxdb/influxdb/services/subscriber" "github.com/influxdb/influxdb/services/subscriber"
@ -76,8 +75,6 @@ type Server struct {
// Server reporting and registration // Server reporting and registration
reportingDisabled bool reportingDisabled bool
enterpriseURL string
enterpriseToken string
// Profiling // Profiling
CPUProfile string CPUProfile string
@ -104,8 +101,6 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
Monitor: monitor.New(c.Monitor), Monitor: monitor.New(c.Monitor),
reportingDisabled: c.ReportingDisabled, reportingDisabled: c.ReportingDisabled,
enterpriseURL: c.EnterpriseURL,
enterpriseToken: c.EnterpriseToken,
} }
// Copy TSDB configuration. // Copy TSDB configuration.
@ -162,6 +157,7 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
// Append services. // Append services.
s.appendClusterService(c.Cluster) s.appendClusterService(c.Cluster)
s.appendPrecreatorService(c.Precreator) s.appendPrecreatorService(c.Precreator)
s.appendRegistrationService(c.Registration)
s.appendSnapshotterService() s.appendSnapshotterService()
s.appendCopierService() s.appendCopierService()
s.appendAdminService(c.Admin) s.appendAdminService(c.Admin)
@ -299,6 +295,21 @@ func (s *Server) appendPrecreatorService(c precreator.Config) error {
return nil return nil
} }
func (s *Server) appendRegistrationService(c registration.Config) error {
if !c.Enabled {
return nil
}
srv, err := registration.NewService(c, s.buildInfo.Version)
if err != nil {
return err
}
srv.MetaStore = s.MetaStore
srv.Monitor = s.Monitor
s.Services = append(s.Services, srv)
return nil
}
func (s *Server) appendUDPService(c udp.Config) { func (s *Server) appendUDPService(c udp.Config) {
if !c.Enabled { if !c.Enabled {
return return
@ -403,11 +414,6 @@ func (s *Server) Open() error {
go s.startServerReporting() go s.startServerReporting()
} }
// Register server
if err := s.registerServer(); err != nil {
log.Printf("failed to register server: %s", err.Error())
}
return nil return nil
}(); err != nil { }(); err != nil {
@ -519,59 +525,6 @@ func (s *Server) reportServer() {
go client.Post("http://m.influxdb.com:8086/db/reporting/series?u=reporter&p=influxdb", "application/json", data) go client.Post("http://m.influxdb.com:8086/db/reporting/series?u=reporter&p=influxdb", "application/json", data)
} }
// registerServer registers the server on start-up.
func (s *Server) registerServer() error {
if s.enterpriseToken == "" {
return nil
}
clusterID, err := s.MetaStore.ClusterID()
if err != nil {
log.Printf("failed to retrieve cluster ID for registration: %s", err.Error())
return err
}
hostname, err := os.Hostname()
if err != nil {
return err
}
j := map[string]interface{}{
"cluster_id": fmt.Sprintf("%d", clusterID),
"server_id": fmt.Sprintf("%d", s.MetaStore.NodeID()),
"host": hostname,
"product": "influxdb",
"version": s.buildInfo.Version,
}
b, err := json.Marshal(j)
if err != nil {
return err
}
url := fmt.Sprintf("%s/api/v1/servers?token=%s", s.enterpriseURL, s.enterpriseToken)
go func() {
client := http.Client{Timeout: time.Duration(5 * time.Second)}
resp, err := client.Post(url, "application/json", bytes.NewBuffer(b))
if err != nil {
log.Printf("failed to register server with %s: %s", s.enterpriseURL, err.Error())
return
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusCreated {
return
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("failed to read response from registration server: %s", err.Error())
return
}
log.Printf("failed to register server with %s: received code %s, body: %s", s.enterpriseURL, resp.Status, string(body))
}()
return nil
}
// monitorErrorChan reads an error channel and resends it through the server. // monitorErrorChan reads an error channel and resends it through the server.
func (s *Server) monitorErrorChan(ch <-chan error) { func (s *Server) monitorErrorChan(ch <-chan error) {
for { for {

View File

@ -66,43 +66,18 @@ func TestServer_DatabaseCommands(t *testing.T) {
command: `SHOW DATABASES`, command: `SHOW DATABASES`,
exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db1"]]}]}]}`, exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db1"]]}]}]}`,
}, },
&Query{
name: "rename database should succeed",
command: `ALTER DATABASE db1 RENAME TO db2`,
exp: `{"results":[{}]}`,
},
&Query{
name: "show databases should reflect change of name",
command: `SHOW DATABASES`,
exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db2"]]}]}]}`,
},
&Query{
name: "rename non-existent database should fail",
command: `ALTER DATABASE db4 RENAME TO db5`,
exp: `{"results":[{"error":"database not found"}]}`,
},
&Query{
name: "rename database to illegal name should fail",
command: `ALTER DATABASE db2 RENAME TO 0xdb0`,
exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 30"}`,
},
&Query{
name: "rename database to already existing datbase should fail",
command: `ALTER DATABASE db2 RENAME TO db0`,
exp: `{"results":[{"error":"database already exists"}]}`,
},
&Query{ &Query{
name: "drop database db0 should succeed", name: "drop database db0 should succeed",
command: `DROP DATABASE db0`, command: `DROP DATABASE db0`,
exp: `{"results":[{}]}`, exp: `{"results":[{}]}`,
}, },
&Query{ &Query{
name: "drop database db2 should succeed", name: "drop database db1 should succeed",
command: `DROP DATABASE db2`, command: `DROP DATABASE db1`,
exp: `{"results":[{}]}`, exp: `{"results":[{}]}`,
}, },
&Query{ &Query{
name: "show databases should have no results after dropping all databases", name: "show database should have no results",
command: `SHOW DATABASES`, command: `SHOW DATABASES`,
exp: `{"results":[{"series":[{"name":"databases","columns":["name"]}]}]}`, exp: `{"results":[{"series":[{"name":"databases","columns":["name"]}]}]}`,
}, },
@ -266,96 +241,6 @@ func TestServer_Query_DropDatabaseIsolated(t *testing.T) {
} }
} }
func TestServer_Query_RenameDatabase(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig(), "")
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
t.Fatal(err)
}
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
t.Fatal(err)
}
writes := []string{
fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
}
test := NewTest("db0", "rp0")
test.write = strings.Join(writes, "\n")
test.addQueries([]*Query{
&Query{
name: "Query data from db0 database",
command: `SELECT * FROM cpu`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Query data from db0 database with GROUP BY *",
command: `SELECT * FROM cpu GROUP BY *`,
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Create continuous query using db0",
command: `CREATE CONTINUOUS QUERY "cq1" ON db0 BEGIN SELECT count(value) INTO "rp1".:MEASUREMENT FROM cpu GROUP BY time(5s) END`,
exp: `{"results":[{}]}`,
},
&Query{
name: "Rename database should fail because of conflicting CQ",
command: `ALTER DATABASE db0 RENAME TO db1`,
exp: `{"results":[{"error":"database rename conflict with existing continuous query"}]}`,
},
&Query{
name: "Drop conflicting CQ",
command: `DROP CONTINUOUS QUERY "cq1" on db0`,
exp: `{"results":[{}]}`,
},
&Query{
name: "Rename database should succeed now",
command: `ALTER DATABASE db0 RENAME TO db1`,
exp: `{"results":[{}]}`,
},
&Query{
name: "Query data from db0 database and ensure it's gone",
command: `SELECT * FROM cpu`,
exp: `{"results":[{"error":"database not found: db0"}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: "Query data from now renamed database db1 and ensure that's there",
command: `SELECT * FROM cpu`,
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`,
params: url.Values{"db": []string{"db1"}},
},
&Query{
name: "Query data from now renamed database db1 and ensure it's still there with GROUP BY *",
command: `SELECT * FROM cpu GROUP BY *`,
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`,
params: url.Values{"db": []string{"db1"}},
},
}...)
for i, query := range test.queries {
if i == 0 {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
}
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
func TestServer_Query_DropAndRecreateSeries(t *testing.T) { func TestServer_Query_DropAndRecreateSeries(t *testing.T) {
t.Parallel() t.Parallel()
s := OpenServer(NewConfig(), "") s := OpenServer(NewConfig(), "")
@ -4371,6 +4256,24 @@ func TestServer_Query_ShowMeasurements(t *testing.T) {
exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`,
params: url.Values{"db": []string{"db0"}}, params: url.Values{"db": []string{"db0"}},
}, },
&Query{
name: `show measurements using WITH`,
command: "SHOW MEASUREMENTS WITH MEASUREMENT = cpu",
exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: `show measurements using WITH and regex`,
command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/",
exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{
name: `show measurements using WITH and regex - no matches`,
command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /.*zzzzz.*/",
exp: `{"results":[{}]}`,
params: url.Values{"db": []string{"db0"}},
},
&Query{ &Query{
name: `show measurements where tag matches regular expression`, name: `show measurements where tag matches regular expression`,
command: "SHOW MEASUREMENTS WHERE region =~ /ca.*/", command: "SHOW MEASUREMENTS WHERE region =~ /ca.*/",
@ -5008,6 +4911,7 @@ func TestServer_Query_IntoTarget(t *testing.T) {
fmt.Sprintf(`foo value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), fmt.Sprintf(`foo value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
fmt.Sprintf(`foo value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), fmt.Sprintf(`foo value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
fmt.Sprintf(`foo value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), fmt.Sprintf(`foo value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()),
fmt.Sprintf(`foo value=4,foobar=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()),
} }
test := NewTest("db0", "rp0") test := NewTest("db0", "rp0")
@ -5017,14 +4921,14 @@ func TestServer_Query_IntoTarget(t *testing.T) {
&Query{ &Query{
name: "into", name: "into",
params: url.Values{"db": []string{"db0"}}, params: url.Values{"db": []string{"db0"}},
command: `SELECT value AS something INTO baz FROM foo`, command: `SELECT * INTO baz FROM foo`,
exp: `{"results":[{"series":[{"name":"result","columns":["time","written"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, exp: `{"results":[{"series":[{"name":"result","columns":["time","written"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`,
}, },
&Query{ &Query{
name: "confirm results", name: "confirm results",
params: url.Values{"db": []string{"db0"}}, params: url.Values{"db": []string{"db0"}},
command: `SELECT something FROM baz`, command: `SELECT * FROM baz`,
exp: `{"results":[{"series":[{"name":"baz","columns":["time","something"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:00:10Z",2],["2000-01-01T00:00:20Z",3],["2000-01-01T00:00:30Z",4]]}]}]}`, exp: `{"results":[{"series":[{"name":"baz","columns":["time","foobar","value"],"values":[["2000-01-01T00:00:00Z",null,1],["2000-01-01T00:00:10Z",null,2],["2000-01-01T00:00:20Z",null,3],["2000-01-01T00:00:30Z",null,4],["2000-01-01T00:00:40Z",3,4]]}]}]}`,
}, },
}...) }...)

View File

@ -18,7 +18,7 @@ When each test runs it does the following:
## Idempotent - Allows for parallel tests ## Idempotent - Allows for parallel tests
Each test should be `idempotent`, meaining that its data will not be affected by other tests, or use cases within the table tests themselves. Each test should be `idempotent`, meaning that its data will not be affected by other tests, or use cases within the table tests themselves.
This allows for parallel testing, keeping the test suite total execution time very low. This allows for parallel testing, keeping the test suite total execution time very low.
### Basic sample test ### Basic sample test

View File

@ -8,9 +8,14 @@
# Change this option to true to disable reporting. # Change this option to true to disable reporting.
reporting-disabled = false reporting-disabled = false
# Enterprise registration control ###
# enterprise-url = "https://enterprise.influxdata.com" # The Enterprise server URL ### Enterprise registration control
# enterprise-token = "" # Registration token for Enterprise server ###
[registration]
# enabled = true
# url = "https://enterprise.influxdata.com" # The Enterprise server URL
# token = "" # Registration token for Enterprise server
### ###
### [meta] ### [meta]

View File

@ -80,7 +80,6 @@ type Node interface {
func (*Query) node() {} func (*Query) node() {}
func (Statements) node() {} func (Statements) node() {}
func (*AlterDatabaseRenameStatement) node() {}
func (*AlterRetentionPolicyStatement) node() {} func (*AlterRetentionPolicyStatement) node() {}
func (*CreateContinuousQueryStatement) node() {} func (*CreateContinuousQueryStatement) node() {}
func (*CreateDatabaseStatement) node() {} func (*CreateDatabaseStatement) node() {}
@ -192,7 +191,6 @@ type ExecutionPrivilege struct {
// ExecutionPrivileges is a list of privileges required to execute a statement. // ExecutionPrivileges is a list of privileges required to execute a statement.
type ExecutionPrivileges []ExecutionPrivilege type ExecutionPrivileges []ExecutionPrivilege
func (*AlterDatabaseRenameStatement) stmt() {}
func (*AlterRetentionPolicyStatement) stmt() {} func (*AlterRetentionPolicyStatement) stmt() {}
func (*CreateContinuousQueryStatement) stmt() {} func (*CreateContinuousQueryStatement) stmt() {}
func (*CreateDatabaseStatement) stmt() {} func (*CreateDatabaseStatement) stmt() {}
@ -510,30 +508,6 @@ func (s *GrantAdminStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
} }
// AlterDatabaseRenameStatement represents a command for renaming a database.
type AlterDatabaseRenameStatement struct {
// Current name of the database
OldName string
// New name of the database
NewName string
}
// String returns a string representation of the rename database statement.
func (s *AlterDatabaseRenameStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("ALTER DATABASE ")
_, _ = buf.WriteString(s.OldName)
_, _ = buf.WriteString(" RENAME ")
_, _ = buf.WriteString(" TO ")
_, _ = buf.WriteString(s.NewName)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute an AlterDatabaseRenameStatement.
func (s *AlterDatabaseRenameStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// SetPasswordUserStatement represents a command for changing user password. // SetPasswordUserStatement represents a command for changing user password.
type SetPasswordUserStatement struct { type SetPasswordUserStatement struct {
// Plain Password // Plain Password
@ -1953,6 +1927,9 @@ func (s *DropContinuousQueryStatement) RequiredPrivileges() ExecutionPrivileges
// ShowMeasurementsStatement represents a command for listing measurements. // ShowMeasurementsStatement represents a command for listing measurements.
type ShowMeasurementsStatement struct { type ShowMeasurementsStatement struct {
// Measurement name or regex.
Source Source
// An expression evaluated on data point. // An expression evaluated on data point.
Condition Expr Condition Expr

View File

@ -226,18 +226,14 @@ func (p *Parser) parseDropStatement() (Statement, error) {
// This function assumes the ALTER token has already been consumed. // This function assumes the ALTER token has already been consumed.
func (p *Parser) parseAlterStatement() (Statement, error) { func (p *Parser) parseAlterStatement() (Statement, error) {
tok, pos, lit := p.scanIgnoreWhitespace() tok, pos, lit := p.scanIgnoreWhitespace()
if tok == RETENTION {
switch tok {
case RETENTION:
if tok, pos, lit = p.scanIgnoreWhitespace(); tok != POLICY { if tok, pos, lit = p.scanIgnoreWhitespace(); tok != POLICY {
return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos) return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos)
} }
return p.parseAlterRetentionPolicyStatement() return p.parseAlterRetentionPolicyStatement()
case DATABASE:
return p.parseAlterDatabaseRenameStatement()
} }
return nil, newParseError(tokstr(tok, lit), []string{"RETENTION", "DATABASE"}, pos) return nil, newParseError(tokstr(tok, lit), []string{"RETENTION"}, pos)
} }
// parseSetPasswordUserStatement parses a string and returns a set statement. // parseSetPasswordUserStatement parses a string and returns a set statement.
@ -1011,6 +1007,29 @@ func (p *Parser) parseShowMeasurementsStatement() (*ShowMeasurementsStatement, e
stmt := &ShowMeasurementsStatement{} stmt := &ShowMeasurementsStatement{}
var err error var err error
// Parse optional WITH clause.
if tok, _, _ := p.scanIgnoreWhitespace(); tok == WITH {
// Parse required MEASUREMENT token.
if err := p.parseTokens([]Token{MEASUREMENT}); err != nil {
return nil, err
}
// Parse required operator: = or =~.
tok, pos, lit := p.scanIgnoreWhitespace()
switch tok {
case EQ, EQREGEX:
// Parse required source (measurement name or regex).
if stmt.Source, err = p.parseSource(); err != nil {
return nil, err
}
default:
return nil, newParseError(tokstr(tok, lit), []string{"=", "=~"}, pos)
}
} else {
// Not a WITH clause so put the token back.
p.unscan()
}
// Parse condition: "WHERE EXPR". // Parse condition: "WHERE EXPR".
if stmt.Condition, err = p.parseCondition(); err != nil { if stmt.Condition, err = p.parseCondition(); err != nil {
return nil, err return nil, err
@ -1449,33 +1468,6 @@ func (p *Parser) parseDropDatabaseStatement() (*DropDatabaseStatement, error) {
return stmt, nil return stmt, nil
} }
// parseAlterDatabaseRenameStatement parses a string and returns an AlterDatabaseRenameStatement.
// This function assumes the "ALTER DATABASE" tokens have already been consumed.
func (p *Parser) parseAlterDatabaseRenameStatement() (*AlterDatabaseRenameStatement, error) {
stmt := &AlterDatabaseRenameStatement{}
// Parse the name of the database to be renamed.
lit, err := p.parseIdent()
if err != nil {
return nil, err
}
stmt.OldName = lit
// Parse required RENAME TO tokens.
if err := p.parseTokens([]Token{RENAME, TO}); err != nil {
return nil, err
}
// Parse the new name of the database.
lit, err = p.parseIdent()
if err != nil {
return nil, err
}
stmt.NewName = lit
return stmt, nil
}
// parseDropSubscriptionStatement parses a string and returns a DropSubscriptionStatement. // parseDropSubscriptionStatement parses a string and returns a DropSubscriptionStatement.
// This function assumes the "DROP SUBSCRIPTION" tokens have already been consumed. // This function assumes the "DROP SUBSCRIPTION" tokens have already been consumed.
func (p *Parser) parseDropSubscriptionStatement() (*DropSubscriptionStatement, error) { func (p *Parser) parseDropSubscriptionStatement() (*DropSubscriptionStatement, error) {

View File

@ -751,6 +751,24 @@ func TestParser_ParseStatement(t *testing.T) {
}, },
}, },
// SHOW MEASUREMENTS WITH MEASUREMENT = cpu
{
s: `SHOW MEASUREMENTS WITH MEASUREMENT = cpu`,
stmt: &influxql.ShowMeasurementsStatement{
Source: &influxql.Measurement{Name: "cpu"},
},
},
// SHOW MEASUREMENTS WITH MEASUREMENT =~ /regex/
{
s: `SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/`,
stmt: &influxql.ShowMeasurementsStatement{
Source: &influxql.Measurement{
Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},
},
},
},
// SHOW RETENTION POLICIES // SHOW RETENTION POLICIES
{ {
s: `SHOW RETENTION POLICIES ON mydb`, s: `SHOW RETENTION POLICIES ON mydb`,
@ -1418,12 +1436,6 @@ func TestParser_ParseStatement(t *testing.T) {
stmt: newAlterRetentionPolicyStatement("default", "testdb", -1, 4, false), stmt: newAlterRetentionPolicyStatement("default", "testdb", -1, 4, false),
}, },
// ALTER DATABASE RENAME
{
s: `ALTER DATABASE db0 RENAME TO db1`,
stmt: newAlterDatabaseRenameStatement("db0", "db1"),
},
// SHOW STATS // SHOW STATS
{ {
s: `SHOW STATS`, s: `SHOW STATS`,
@ -1687,15 +1699,11 @@ func TestParser_ParseStatement(t *testing.T) {
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 0`, err: `invalid value 0: must be 1 <= n <= 2147483647 at line 1, char 67`}, {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 0`, err: `invalid value 0: must be 1 <= n <= 2147483647 at line 1, char 67`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected number at line 1, char 67`}, {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected number at line 1, char 67`},
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 1 foo`, err: `found foo, expected DEFAULT at line 1, char 69`}, {s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 1 foo`, err: `found foo, expected DEFAULT at line 1, char 69`},
{s: `ALTER`, err: `found EOF, expected RETENTION, DATABASE at line 1, char 7`}, {s: `ALTER`, err: `found EOF, expected RETENTION at line 1, char 7`},
{s: `ALTER RETENTION`, err: `found EOF, expected POLICY at line 1, char 17`}, {s: `ALTER RETENTION`, err: `found EOF, expected POLICY at line 1, char 17`},
{s: `ALTER RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 24`}, {s: `ALTER RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 24`},
{s: `ALTER RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 32`}, {s: `ALTER RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 35`}, {s: `ALTER RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 32`}, {s: `ALTER RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 35`},
{s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, RETENTION, DEFAULT at line 1, char 42`}, {s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, RETENTION, DEFAULT at line 1, char 42`},
{s: `ALTER DATABASE`, err: `found EOF, expected identifier at line 1, char 16`},
{s: `ALTER DATABASE db0`, err: `found EOF, expected RENAME at line 1, char 20`},
{s: `ALTER DATABASE db0 RENAME`, err: `found EOF, expected TO at line 1, char 27`},
{s: `ALTER DATABASE db0 RENAME TO`, err: `found EOF, expected identifier at line 1, char 30`},
{s: `SET`, err: `found EOF, expected PASSWORD at line 1, char 5`}, {s: `SET`, err: `found EOF, expected PASSWORD at line 1, char 5`},
{s: `SET PASSWORD`, err: `found EOF, expected FOR at line 1, char 14`}, {s: `SET PASSWORD`, err: `found EOF, expected FOR at line 1, char 14`},
{s: `SET PASSWORD something`, err: `found something, expected FOR at line 1, char 14`}, {s: `SET PASSWORD something`, err: `found something, expected FOR at line 1, char 14`},
@ -2129,14 +2137,6 @@ func newAlterRetentionPolicyStatement(name string, DB string, d time.Duration, r
return stmt return stmt
} }
// newAlterDatabaseRenameStatement creates an initialized AlterDatabaseRenameStatement.
func newAlterDatabaseRenameStatement(oldName, newName string) *influxql.AlterDatabaseRenameStatement {
return &influxql.AlterDatabaseRenameStatement{
OldName: oldName,
NewName: newName,
}
}
// mustMarshalJSON encodes a value to JSON. // mustMarshalJSON encodes a value to JSON.
func mustMarshalJSON(v interface{}) []byte { func mustMarshalJSON(v interface{}) []byte {
b, err := json.Marshal(v) b, err := json.Marshal(v)

View File

@ -150,7 +150,6 @@ func TestScanner_Scan(t *testing.T) {
{s: `QUERIES`, tok: influxql.QUERIES}, {s: `QUERIES`, tok: influxql.QUERIES},
{s: `QUERY`, tok: influxql.QUERY}, {s: `QUERY`, tok: influxql.QUERY},
{s: `READ`, tok: influxql.READ}, {s: `READ`, tok: influxql.READ},
{s: `RENAME`, tok: influxql.RENAME},
{s: `RETENTION`, tok: influxql.RETENTION}, {s: `RETENTION`, tok: influxql.RETENTION},
{s: `REVOKE`, tok: influxql.REVOKE}, {s: `REVOKE`, tok: influxql.REVOKE},
{s: `SELECT`, tok: influxql.SELECT}, {s: `SELECT`, tok: influxql.SELECT},

View File

@ -107,7 +107,6 @@ const (
QUERIES QUERIES
QUERY QUERY
READ READ
RENAME
REPLICATION REPLICATION
RETENTION RETENTION
REVOKE REVOKE
@ -224,7 +223,6 @@ var tokens = [...]string{
QUERIES: "QUERIES", QUERIES: "QUERIES",
QUERY: "QUERY", QUERY: "QUERY",
READ: "READ", READ: "READ",
RENAME: "RENAME",
REPLICATION: "REPLICATION", REPLICATION: "REPLICATION",
RETENTION: "RETENTION", RETENTION: "RETENTION",
REVOKE: "REVOKE", REVOKE: "REVOKE",

View File

@ -1,9 +1,7 @@
package meta package meta
import ( import (
"fmt"
"sort" "sort"
"strings"
"time" "time"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
@ -179,69 +177,6 @@ func (data *Data) DropDatabase(name string) error {
return ErrDatabaseNotFound return ErrDatabaseNotFound
} }
// RenameDatabase renames a database.
// Returns an error if oldName or newName is blank
// or if a database with the newName already exists
// or if a database with oldName does not exist
func (data *Data) RenameDatabase(oldName, newName string) error {
if newName == "" || oldName == "" {
return ErrDatabaseNameRequired
}
if data.Database(newName) != nil {
return ErrDatabaseExists
}
if data.Database(oldName) == nil {
return ErrDatabaseNotFound
}
// TODO should rename database in continuous queries also
// for now, just return an error if there is a possible conflict
if data.isDatabaseNameUsedInCQ(oldName) {
return ErrDatabaseRenameCQConflict
}
// find database named oldName and rename it to newName
for i := range data.Databases {
if data.Databases[i].Name == oldName {
data.Databases[i].Name = newName
data.switchDatabaseUserPrivileges(oldName, newName)
return nil
}
}
return ErrDatabaseNotFound
}
// isDatabaseNameUsedInCQ returns true if a database name is used in any continuous query
func (data *Data) isDatabaseNameUsedInCQ(dbName string) bool {
CQOnDb := fmt.Sprintf(" ON %s ", dbName)
CQIntoDb := fmt.Sprintf(" INTO \"%s\".", dbName)
CQFromDb := fmt.Sprintf(" FROM \"%s\".", dbName)
for i := range data.Databases {
for j := range data.Databases[i].ContinuousQueries {
query := data.Databases[i].ContinuousQueries[j].Query
if strings.Contains(query, CQOnDb) {
return true
}
if strings.Contains(query, CQIntoDb) {
return true
}
if strings.Contains(query, CQFromDb) {
return true
}
}
}
return false
}
// switchDatabaseUserPrivileges changes the database associated with user privileges
func (data *Data) switchDatabaseUserPrivileges(oldDatabase, newDatabase string) error {
for i := range data.Users {
if p, ok := data.Users[i].Privileges[oldDatabase]; ok {
data.Users[i].Privileges[newDatabase] = p
delete(data.Users[i].Privileges, oldDatabase)
}
}
return nil
}
// RetentionPolicy returns a retention policy for a database by name. // RetentionPolicy returns a retention policy for a database by name.
func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, error) { func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, error) {
di := data.Database(database) di := data.Database(database)

View File

@ -135,97 +135,6 @@ func TestData_DropDatabase(t *testing.T) {
} }
} }
// Ensure a database can be renamed.
func TestData_RenameDatabase(t *testing.T) {
var data meta.Data
for i := 0; i < 2; i++ {
if err := data.CreateDatabase(fmt.Sprintf("db%d", i)); err != nil {
t.Fatal(err)
}
}
if err := data.RenameDatabase("db1", "db2"); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(data.Databases, []meta.DatabaseInfo{{Name: "db0"}, {Name: "db2"}}) {
t.Fatalf("unexpected databases: %#v", data.Databases)
}
}
// Ensure that user privileges are updated correctly when database is renamed.
func TestData_RenameDatabaseUpdatesPrivileges(t *testing.T) {
var data meta.Data
for i := 0; i < 2; i++ {
if err := data.CreateDatabase(fmt.Sprintf("db%d", i)); err != nil {
t.Fatal(err)
}
}
data.Users = []meta.UserInfo{{
Name: "susy",
Hash: "ABC123",
Admin: true,
Privileges: map[string]influxql.Privilege{
"db1": influxql.AllPrivileges, "db0": influxql.ReadPrivilege}}}
if err := data.RenameDatabase("db1", "db2"); err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(data.Users,
[]meta.UserInfo{{
Name: "susy",
Hash: "ABC123",
Admin: true,
Privileges: map[string]influxql.Privilege{
"db2": influxql.AllPrivileges, "db0": influxql.ReadPrivilege}}}) {
t.Fatalf("unexpected user privileges: %#v", data.Users)
}
}
// Ensure that renaming a database without both old and new names returns an error.
func TestData_RenameDatabase_ErrNameRequired(t *testing.T) {
var data meta.Data
if err := data.RenameDatabase("", ""); err != meta.ErrDatabaseNameRequired {
t.Fatalf("unexpected error: %s", err)
}
if err := data.RenameDatabase("from_foo", ""); err != meta.ErrDatabaseNameRequired {
t.Fatalf("unexpected error: %s", err)
}
if err := data.RenameDatabase("", "to_foo"); err != meta.ErrDatabaseNameRequired {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure that renaming a database returns an error if there is a possibly conflicting CQ
func TestData_RenameDatabase_ErrDatabaseCQConflict(t *testing.T) {
var data meta.Data
if err := data.CreateDatabase("db0"); err != nil {
t.Fatal(err)
} else if err := data.CreateDatabase("db1"); err != nil {
t.Fatal(err)
} else if err := data.CreateContinuousQuery("db0", "cq0", `CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count() INTO "foo"."default"."bar" FROM "foo"."foobar" END`); err != nil {
t.Fatal(err)
} else if err := data.CreateContinuousQuery("db1", "cq1", `CREATE CONTINUOUS QUERY cq1 ON db1 BEGIN SELECT count() INTO "db1"."default"."bar" FROM "db0"."foobar" END`); err != nil {
t.Fatal(err)
} else if err := data.CreateContinuousQuery("db1", "cq2", `CREATE CONTINUOUS QUERY cq2 ON db1 BEGIN SELECT count() INTO "db0"."default"."bar" FROM "db1"."foobar" END`); err != nil {
t.Fatal(err)
} else if err := data.CreateContinuousQuery("db1", "noconflict", `CREATE CONTINUOUS QUERY noconflict ON db1 BEGIN SELECT count() INTO "db1"."default"."bar" FROM "db1"."foobar" END`); err != nil {
t.Fatal(err)
} else if err := data.RenameDatabase("db0", "db2"); err == nil {
t.Fatalf("unexpected rename database success despite cq conflict")
} else if err := data.DropContinuousQuery("db0", "cq0"); err != nil {
t.Fatal(err)
} else if err := data.RenameDatabase("db0", "db2"); err == nil {
t.Fatalf("unexpected rename database success despite cq conflict")
} else if err := data.DropContinuousQuery("db1", "cq1"); err != nil {
t.Fatal(err)
} else if err := data.RenameDatabase("db0", "db2"); err == nil {
t.Fatalf("unexpected rename database success despite cq conflict")
} else if err := data.DropContinuousQuery("db1", "cq2"); err != nil {
t.Fatal(err)
} else if err := data.RenameDatabase("db0", "db2"); err != nil {
t.Fatal(err)
}
}
// Ensure a retention policy can be created. // Ensure a retention policy can be created.
func TestData_CreateRetentionPolicy(t *testing.T) { func TestData_CreateRetentionPolicy(t *testing.T) {
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}, {ID: 2}}} data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}, {ID: 2}}}

View File

@ -47,9 +47,6 @@ var (
// ErrDatabaseNameRequired is returned when creating a database without a name. // ErrDatabaseNameRequired is returned when creating a database without a name.
ErrDatabaseNameRequired = newError("database name required") ErrDatabaseNameRequired = newError("database name required")
// ErrDatabaseRenameCQConflict is returned when attempting to rename a database in use by a CQ.
ErrDatabaseRenameCQConflict = newError("database rename conflict with existing continuous query")
) )
var ( var (

View File

@ -40,7 +40,6 @@ It has these top-level messages:
SetDataCommand SetDataCommand
SetAdminPrivilegeCommand SetAdminPrivilegeCommand
UpdateNodeCommand UpdateNodeCommand
RenameDatabaseCommand
CreateSubscriptionCommand CreateSubscriptionCommand
DropSubscriptionCommand DropSubscriptionCommand
Response Response
@ -54,12 +53,10 @@ It has these top-level messages:
package internal package internal
import proto "github.com/gogo/protobuf/proto" import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math" import math "math"
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf var _ = math.Inf
type RPCType int32 type RPCType int32
@ -120,9 +117,8 @@ const (
Command_SetDataCommand Command_Type = 17 Command_SetDataCommand Command_Type = 17
Command_SetAdminPrivilegeCommand Command_Type = 18 Command_SetAdminPrivilegeCommand Command_Type = 18
Command_UpdateNodeCommand Command_Type = 19 Command_UpdateNodeCommand Command_Type = 19
Command_RenameDatabaseCommand Command_Type = 20 Command_CreateSubscriptionCommand Command_Type = 21
Command_CreateSubscriptionCommand Command_Type = 22 Command_DropSubscriptionCommand Command_Type = 22
Command_DropSubscriptionCommand Command_Type = 23
) )
var Command_Type_name = map[int32]string{ var Command_Type_name = map[int32]string{
@ -145,9 +141,8 @@ var Command_Type_name = map[int32]string{
17: "SetDataCommand", 17: "SetDataCommand",
18: "SetAdminPrivilegeCommand", 18: "SetAdminPrivilegeCommand",
19: "UpdateNodeCommand", 19: "UpdateNodeCommand",
20: "RenameDatabaseCommand", 21: "CreateSubscriptionCommand",
22: "CreateSubscriptionCommand", 22: "DropSubscriptionCommand",
23: "DropSubscriptionCommand",
} }
var Command_Type_value = map[string]int32{ var Command_Type_value = map[string]int32{
"CreateNodeCommand": 1, "CreateNodeCommand": 1,
@ -169,9 +164,8 @@ var Command_Type_value = map[string]int32{
"SetDataCommand": 17, "SetDataCommand": 17,
"SetAdminPrivilegeCommand": 18, "SetAdminPrivilegeCommand": 18,
"UpdateNodeCommand": 19, "UpdateNodeCommand": 19,
"RenameDatabaseCommand": 20, "CreateSubscriptionCommand": 21,
"CreateSubscriptionCommand": 22, "DropSubscriptionCommand": 22,
"DropSubscriptionCommand": 23,
} }
func (x Command_Type) Enum() *Command_Type { func (x Command_Type) Enum() *Command_Type {
@ -192,15 +186,15 @@ func (x *Command_Type) UnmarshalJSON(data []byte) error {
} }
type Data struct { type Data struct {
Term *uint64 `protobuf:"varint,1,req,name=Term" json:"Term,omitempty"` Term *uint64 `protobuf:"varint,1,req" json:"Term,omitempty"`
Index *uint64 `protobuf:"varint,2,req,name=Index" json:"Index,omitempty"` Index *uint64 `protobuf:"varint,2,req" json:"Index,omitempty"`
ClusterID *uint64 `protobuf:"varint,3,req,name=ClusterID" json:"ClusterID,omitempty"` ClusterID *uint64 `protobuf:"varint,3,req" json:"ClusterID,omitempty"`
Nodes []*NodeInfo `protobuf:"bytes,4,rep,name=Nodes" json:"Nodes,omitempty"` Nodes []*NodeInfo `protobuf:"bytes,4,rep" json:"Nodes,omitempty"`
Databases []*DatabaseInfo `protobuf:"bytes,5,rep,name=Databases" json:"Databases,omitempty"` Databases []*DatabaseInfo `protobuf:"bytes,5,rep" json:"Databases,omitempty"`
Users []*UserInfo `protobuf:"bytes,6,rep,name=Users" json:"Users,omitempty"` Users []*UserInfo `protobuf:"bytes,6,rep" json:"Users,omitempty"`
MaxNodeID *uint64 `protobuf:"varint,7,req,name=MaxNodeID" json:"MaxNodeID,omitempty"` MaxNodeID *uint64 `protobuf:"varint,7,req" json:"MaxNodeID,omitempty"`
MaxShardGroupID *uint64 `protobuf:"varint,8,req,name=MaxShardGroupID" json:"MaxShardGroupID,omitempty"` MaxShardGroupID *uint64 `protobuf:"varint,8,req" json:"MaxShardGroupID,omitempty"`
MaxShardID *uint64 `protobuf:"varint,9,req,name=MaxShardID" json:"MaxShardID,omitempty"` MaxShardID *uint64 `protobuf:"varint,9,req" json:"MaxShardID,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -272,8 +266,8 @@ func (m *Data) GetMaxShardID() uint64 {
} }
type NodeInfo struct { type NodeInfo struct {
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
Host *string `protobuf:"bytes,2,req,name=Host" json:"Host,omitempty"` Host *string `protobuf:"bytes,2,req" json:"Host,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -296,10 +290,10 @@ func (m *NodeInfo) GetHost() string {
} }
type DatabaseInfo struct { type DatabaseInfo struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
DefaultRetentionPolicy *string `protobuf:"bytes,2,req,name=DefaultRetentionPolicy" json:"DefaultRetentionPolicy,omitempty"` DefaultRetentionPolicy *string `protobuf:"bytes,2,req" json:"DefaultRetentionPolicy,omitempty"`
RetentionPolicies []*RetentionPolicyInfo `protobuf:"bytes,3,rep,name=RetentionPolicies" json:"RetentionPolicies,omitempty"` RetentionPolicies []*RetentionPolicyInfo `protobuf:"bytes,3,rep" json:"RetentionPolicies,omitempty"`
ContinuousQueries []*ContinuousQueryInfo `protobuf:"bytes,4,rep,name=ContinuousQueries" json:"ContinuousQueries,omitempty"` ContinuousQueries []*ContinuousQueryInfo `protobuf:"bytes,4,rep" json:"ContinuousQueries,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -336,12 +330,12 @@ func (m *DatabaseInfo) GetContinuousQueries() []*ContinuousQueryInfo {
} }
type RetentionPolicyInfo struct { type RetentionPolicyInfo struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
Duration *int64 `protobuf:"varint,2,req,name=Duration" json:"Duration,omitempty"` Duration *int64 `protobuf:"varint,2,req" json:"Duration,omitempty"`
ShardGroupDuration *int64 `protobuf:"varint,3,req,name=ShardGroupDuration" json:"ShardGroupDuration,omitempty"` ShardGroupDuration *int64 `protobuf:"varint,3,req" json:"ShardGroupDuration,omitempty"`
ReplicaN *uint32 `protobuf:"varint,4,req,name=ReplicaN" json:"ReplicaN,omitempty"` ReplicaN *uint32 `protobuf:"varint,4,req" json:"ReplicaN,omitempty"`
ShardGroups []*ShardGroupInfo `protobuf:"bytes,5,rep,name=ShardGroups" json:"ShardGroups,omitempty"` ShardGroups []*ShardGroupInfo `protobuf:"bytes,5,rep" json:"ShardGroups,omitempty"`
Subscriptions []*SubscriptionInfo `protobuf:"bytes,6,rep,name=Subscriptions" json:"Subscriptions,omitempty"` Subscriptions []*SubscriptionInfo `protobuf:"bytes,6,rep" json:"Subscriptions,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -392,11 +386,11 @@ func (m *RetentionPolicyInfo) GetSubscriptions() []*SubscriptionInfo {
} }
type ShardGroupInfo struct { type ShardGroupInfo struct {
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
StartTime *int64 `protobuf:"varint,2,req,name=StartTime" json:"StartTime,omitempty"` StartTime *int64 `protobuf:"varint,2,req" json:"StartTime,omitempty"`
EndTime *int64 `protobuf:"varint,3,req,name=EndTime" json:"EndTime,omitempty"` EndTime *int64 `protobuf:"varint,3,req" json:"EndTime,omitempty"`
DeletedAt *int64 `protobuf:"varint,4,req,name=DeletedAt" json:"DeletedAt,omitempty"` DeletedAt *int64 `protobuf:"varint,4,req" json:"DeletedAt,omitempty"`
Shards []*ShardInfo `protobuf:"bytes,5,rep,name=Shards" json:"Shards,omitempty"` Shards []*ShardInfo `protobuf:"bytes,5,rep" json:"Shards,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -440,9 +434,9 @@ func (m *ShardGroupInfo) GetShards() []*ShardInfo {
} }
type ShardInfo struct { type ShardInfo struct {
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
OwnerIDs []uint64 `protobuf:"varint,2,rep,name=OwnerIDs" json:"OwnerIDs,omitempty"` OwnerIDs []uint64 `protobuf:"varint,2,rep" json:"OwnerIDs,omitempty"`
Owners []*ShardOwner `protobuf:"bytes,3,rep,name=Owners" json:"Owners,omitempty"` Owners []*ShardOwner `protobuf:"bytes,3,rep" json:"Owners,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -472,9 +466,9 @@ func (m *ShardInfo) GetOwners() []*ShardOwner {
} }
type SubscriptionInfo struct { type SubscriptionInfo struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
Mode *string `protobuf:"bytes,2,req,name=Mode" json:"Mode,omitempty"` Mode *string `protobuf:"bytes,2,req" json:"Mode,omitempty"`
Destinations []string `protobuf:"bytes,3,rep,name=Destinations" json:"Destinations,omitempty"` Destinations []string `protobuf:"bytes,3,rep" json:"Destinations,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -504,7 +498,7 @@ func (m *SubscriptionInfo) GetDestinations() []string {
} }
type ShardOwner struct { type ShardOwner struct {
NodeID *uint64 `protobuf:"varint,1,req,name=NodeID" json:"NodeID,omitempty"` NodeID *uint64 `protobuf:"varint,1,req" json:"NodeID,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -520,8 +514,8 @@ func (m *ShardOwner) GetNodeID() uint64 {
} }
type ContinuousQueryInfo struct { type ContinuousQueryInfo struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
Query *string `protobuf:"bytes,2,req,name=Query" json:"Query,omitempty"` Query *string `protobuf:"bytes,2,req" json:"Query,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -544,10 +538,10 @@ func (m *ContinuousQueryInfo) GetQuery() string {
} }
type UserInfo struct { type UserInfo struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
Hash *string `protobuf:"bytes,2,req,name=Hash" json:"Hash,omitempty"` Hash *string `protobuf:"bytes,2,req" json:"Hash,omitempty"`
Admin *bool `protobuf:"varint,3,req,name=Admin" json:"Admin,omitempty"` Admin *bool `protobuf:"varint,3,req" json:"Admin,omitempty"`
Privileges []*UserPrivilege `protobuf:"bytes,4,rep,name=Privileges" json:"Privileges,omitempty"` Privileges []*UserPrivilege `protobuf:"bytes,4,rep" json:"Privileges,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -584,8 +578,8 @@ func (m *UserInfo) GetPrivileges() []*UserPrivilege {
} }
type UserPrivilege struct { type UserPrivilege struct {
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
Privilege *int32 `protobuf:"varint,2,req,name=Privilege" json:"Privilege,omitempty"` Privilege *int32 `protobuf:"varint,2,req" json:"Privilege,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -639,8 +633,8 @@ func (m *Command) GetType() Command_Type {
} }
type CreateNodeCommand struct { type CreateNodeCommand struct {
Host *string `protobuf:"bytes,1,req,name=Host" json:"Host,omitempty"` Host *string `protobuf:"bytes,1,req" json:"Host,omitempty"`
Rand *uint64 `protobuf:"varint,2,req,name=Rand" json:"Rand,omitempty"` Rand *uint64 `protobuf:"varint,2,req" json:"Rand,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -671,8 +665,8 @@ var E_CreateNodeCommand_Command = &proto.ExtensionDesc{
} }
type DeleteNodeCommand struct { type DeleteNodeCommand struct {
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
Force *bool `protobuf:"varint,2,req,name=Force" json:"Force,omitempty"` Force *bool `protobuf:"varint,2,req" json:"Force,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -703,7 +697,7 @@ var E_DeleteNodeCommand_Command = &proto.ExtensionDesc{
} }
type CreateDatabaseCommand struct { type CreateDatabaseCommand struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -727,7 +721,7 @@ var E_CreateDatabaseCommand_Command = &proto.ExtensionDesc{
} }
type DropDatabaseCommand struct { type DropDatabaseCommand struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -751,8 +745,8 @@ var E_DropDatabaseCommand_Command = &proto.ExtensionDesc{
} }
type CreateRetentionPolicyCommand struct { type CreateRetentionPolicyCommand struct {
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
RetentionPolicy *RetentionPolicyInfo `protobuf:"bytes,2,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` RetentionPolicy *RetentionPolicyInfo `protobuf:"bytes,2,req" json:"RetentionPolicy,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -783,8 +777,8 @@ var E_CreateRetentionPolicyCommand_Command = &proto.ExtensionDesc{
} }
type DropRetentionPolicyCommand struct { type DropRetentionPolicyCommand struct {
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -815,8 +809,8 @@ var E_DropRetentionPolicyCommand_Command = &proto.ExtensionDesc{
} }
type SetDefaultRetentionPolicyCommand struct { type SetDefaultRetentionPolicyCommand struct {
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -847,11 +841,11 @@ var E_SetDefaultRetentionPolicyCommand_Command = &proto.ExtensionDesc{
} }
type UpdateRetentionPolicyCommand struct { type UpdateRetentionPolicyCommand struct {
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
NewName *string `protobuf:"bytes,3,opt,name=NewName" json:"NewName,omitempty"` NewName *string `protobuf:"bytes,3,opt" json:"NewName,omitempty"`
Duration *int64 `protobuf:"varint,4,opt,name=Duration" json:"Duration,omitempty"` Duration *int64 `protobuf:"varint,4,opt" json:"Duration,omitempty"`
ReplicaN *uint32 `protobuf:"varint,5,opt,name=ReplicaN" json:"ReplicaN,omitempty"` ReplicaN *uint32 `protobuf:"varint,5,opt" json:"ReplicaN,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -903,9 +897,9 @@ var E_UpdateRetentionPolicyCommand_Command = &proto.ExtensionDesc{
} }
type CreateShardGroupCommand struct { type CreateShardGroupCommand struct {
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
Policy *string `protobuf:"bytes,2,req,name=Policy" json:"Policy,omitempty"` Policy *string `protobuf:"bytes,2,req" json:"Policy,omitempty"`
Timestamp *int64 `protobuf:"varint,3,req,name=Timestamp" json:"Timestamp,omitempty"` Timestamp *int64 `protobuf:"varint,3,req" json:"Timestamp,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -943,9 +937,9 @@ var E_CreateShardGroupCommand_Command = &proto.ExtensionDesc{
} }
type DeleteShardGroupCommand struct { type DeleteShardGroupCommand struct {
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
Policy *string `protobuf:"bytes,2,req,name=Policy" json:"Policy,omitempty"` Policy *string `protobuf:"bytes,2,req" json:"Policy,omitempty"`
ShardGroupID *uint64 `protobuf:"varint,3,req,name=ShardGroupID" json:"ShardGroupID,omitempty"` ShardGroupID *uint64 `protobuf:"varint,3,req" json:"ShardGroupID,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -983,9 +977,9 @@ var E_DeleteShardGroupCommand_Command = &proto.ExtensionDesc{
} }
type CreateContinuousQueryCommand struct { type CreateContinuousQueryCommand struct {
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
Query *string `protobuf:"bytes,3,req,name=Query" json:"Query,omitempty"` Query *string `protobuf:"bytes,3,req" json:"Query,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1023,8 +1017,8 @@ var E_CreateContinuousQueryCommand_Command = &proto.ExtensionDesc{
} }
type DropContinuousQueryCommand struct { type DropContinuousQueryCommand struct {
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1055,9 +1049,9 @@ var E_DropContinuousQueryCommand_Command = &proto.ExtensionDesc{
} }
type CreateUserCommand struct { type CreateUserCommand struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
Hash *string `protobuf:"bytes,2,req,name=Hash" json:"Hash,omitempty"` Hash *string `protobuf:"bytes,2,req" json:"Hash,omitempty"`
Admin *bool `protobuf:"varint,3,req,name=Admin" json:"Admin,omitempty"` Admin *bool `protobuf:"varint,3,req" json:"Admin,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1095,7 +1089,7 @@ var E_CreateUserCommand_Command = &proto.ExtensionDesc{
} }
type DropUserCommand struct { type DropUserCommand struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1119,8 +1113,8 @@ var E_DropUserCommand_Command = &proto.ExtensionDesc{
} }
type UpdateUserCommand struct { type UpdateUserCommand struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
Hash *string `protobuf:"bytes,2,req,name=Hash" json:"Hash,omitempty"` Hash *string `protobuf:"bytes,2,req" json:"Hash,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1151,9 +1145,9 @@ var E_UpdateUserCommand_Command = &proto.ExtensionDesc{
} }
type SetPrivilegeCommand struct { type SetPrivilegeCommand struct {
Username *string `protobuf:"bytes,1,req,name=Username" json:"Username,omitempty"` Username *string `protobuf:"bytes,1,req" json:"Username,omitempty"`
Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,2,req" json:"Database,omitempty"`
Privilege *int32 `protobuf:"varint,3,req,name=Privilege" json:"Privilege,omitempty"` Privilege *int32 `protobuf:"varint,3,req" json:"Privilege,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1191,7 +1185,7 @@ var E_SetPrivilegeCommand_Command = &proto.ExtensionDesc{
} }
type SetDataCommand struct { type SetDataCommand struct {
Data *Data `protobuf:"bytes,1,req,name=Data" json:"Data,omitempty"` Data *Data `protobuf:"bytes,1,req" json:"Data,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1215,8 +1209,8 @@ var E_SetDataCommand_Command = &proto.ExtensionDesc{
} }
type SetAdminPrivilegeCommand struct { type SetAdminPrivilegeCommand struct {
Username *string `protobuf:"bytes,1,req,name=Username" json:"Username,omitempty"` Username *string `protobuf:"bytes,1,req" json:"Username,omitempty"`
Admin *bool `protobuf:"varint,2,req,name=Admin" json:"Admin,omitempty"` Admin *bool `protobuf:"varint,2,req" json:"Admin,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1247,8 +1241,8 @@ var E_SetAdminPrivilegeCommand_Command = &proto.ExtensionDesc{
} }
type UpdateNodeCommand struct { type UpdateNodeCommand struct {
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
Host *string `protobuf:"bytes,2,req,name=Host" json:"Host,omitempty"` Host *string `protobuf:"bytes,2,req" json:"Host,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1278,44 +1272,12 @@ var E_UpdateNodeCommand_Command = &proto.ExtensionDesc{
Tag: "bytes,119,opt,name=command", Tag: "bytes,119,opt,name=command",
} }
type RenameDatabaseCommand struct {
OldName *string `protobuf:"bytes,1,req,name=oldName" json:"oldName,omitempty"`
NewName *string `protobuf:"bytes,2,req,name=newName" json:"newName,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *RenameDatabaseCommand) Reset() { *m = RenameDatabaseCommand{} }
func (m *RenameDatabaseCommand) String() string { return proto.CompactTextString(m) }
func (*RenameDatabaseCommand) ProtoMessage() {}
func (m *RenameDatabaseCommand) GetOldName() string {
if m != nil && m.OldName != nil {
return *m.OldName
}
return ""
}
func (m *RenameDatabaseCommand) GetNewName() string {
if m != nil && m.NewName != nil {
return *m.NewName
}
return ""
}
var E_RenameDatabaseCommand_Command = &proto.ExtensionDesc{
ExtendedType: (*Command)(nil),
ExtensionType: (*RenameDatabaseCommand)(nil),
Field: 120,
Name: "internal.RenameDatabaseCommand.command",
Tag: "bytes,120,opt,name=command",
}
type CreateSubscriptionCommand struct { type CreateSubscriptionCommand struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,2,req" json:"Database,omitempty"`
RetentionPolicy *string `protobuf:"bytes,3,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` RetentionPolicy *string `protobuf:"bytes,3,req" json:"RetentionPolicy,omitempty"`
Mode *string `protobuf:"bytes,4,req,name=Mode" json:"Mode,omitempty"` Mode *string `protobuf:"bytes,4,req" json:"Mode,omitempty"`
Destinations []string `protobuf:"bytes,5,rep,name=Destinations" json:"Destinations,omitempty"` Destinations []string `protobuf:"bytes,5,rep" json:"Destinations,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1367,9 +1329,9 @@ var E_CreateSubscriptionCommand_Command = &proto.ExtensionDesc{
} }
type DropSubscriptionCommand struct { type DropSubscriptionCommand struct {
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"` Database *string `protobuf:"bytes,2,req" json:"Database,omitempty"`
RetentionPolicy *string `protobuf:"bytes,3,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` RetentionPolicy *string `protobuf:"bytes,3,req" json:"RetentionPolicy,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1407,9 +1369,9 @@ var E_DropSubscriptionCommand_Command = &proto.ExtensionDesc{
} }
type Response struct { type Response struct {
OK *bool `protobuf:"varint,1,req,name=OK" json:"OK,omitempty"` OK *bool `protobuf:"varint,1,req" json:"OK,omitempty"`
Error *string `protobuf:"bytes,2,opt,name=Error" json:"Error,omitempty"` Error *string `protobuf:"bytes,2,opt" json:"Error,omitempty"`
Index *uint64 `protobuf:"varint,3,opt,name=Index" json:"Index,omitempty"` Index *uint64 `protobuf:"varint,3,opt" json:"Index,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1439,8 +1401,8 @@ func (m *Response) GetIndex() uint64 {
} }
type ResponseHeader struct { type ResponseHeader struct {
OK *bool `protobuf:"varint,1,req,name=OK" json:"OK,omitempty"` OK *bool `protobuf:"varint,1,req" json:"OK,omitempty"`
Error *string `protobuf:"bytes,2,opt,name=Error" json:"Error,omitempty"` Error *string `protobuf:"bytes,2,opt" json:"Error,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1463,7 +1425,7 @@ func (m *ResponseHeader) GetError() string {
} }
type ErrorResponse struct { type ErrorResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"` Header *ResponseHeader `protobuf:"bytes,1,req" json:"Header,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1479,9 +1441,9 @@ func (m *ErrorResponse) GetHeader() *ResponseHeader {
} }
type FetchDataRequest struct { type FetchDataRequest struct {
Index *uint64 `protobuf:"varint,1,req,name=Index" json:"Index,omitempty"` Index *uint64 `protobuf:"varint,1,req" json:"Index,omitempty"`
Term *uint64 `protobuf:"varint,2,req,name=Term" json:"Term,omitempty"` Term *uint64 `protobuf:"varint,2,req" json:"Term,omitempty"`
Blocking *bool `protobuf:"varint,3,opt,name=Blocking,def=0" json:"Blocking,omitempty"` Blocking *bool `protobuf:"varint,3,opt,def=0" json:"Blocking,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1513,10 +1475,10 @@ func (m *FetchDataRequest) GetBlocking() bool {
} }
type FetchDataResponse struct { type FetchDataResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"` Header *ResponseHeader `protobuf:"bytes,1,req" json:"Header,omitempty"`
Index *uint64 `protobuf:"varint,2,req,name=Index" json:"Index,omitempty"` Index *uint64 `protobuf:"varint,2,req" json:"Index,omitempty"`
Term *uint64 `protobuf:"varint,3,req,name=Term" json:"Term,omitempty"` Term *uint64 `protobuf:"varint,3,req" json:"Term,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` Data []byte `protobuf:"bytes,4,opt" json:"Data,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1553,7 +1515,7 @@ func (m *FetchDataResponse) GetData() []byte {
} }
type JoinRequest struct { type JoinRequest struct {
Addr *string `protobuf:"bytes,1,req,name=Addr" json:"Addr,omitempty"` Addr *string `protobuf:"bytes,1,req" json:"Addr,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1569,14 +1531,14 @@ func (m *JoinRequest) GetAddr() string {
} }
type JoinResponse struct { type JoinResponse struct {
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"` Header *ResponseHeader `protobuf:"bytes,1,req" json:"Header,omitempty"`
// Indicates that this node should take part in the raft cluster. // Indicates that this node should take part in the raft cluster.
EnableRaft *bool `protobuf:"varint,2,opt,name=EnableRaft" json:"EnableRaft,omitempty"` EnableRaft *bool `protobuf:"varint,2,opt" json:"EnableRaft,omitempty"`
// The addresses of raft peers to use if joining as a raft member. If not joining // The addresses of raft peers to use if joining as a raft member. If not joining
// as a raft member, these are the nodes running raft. // as a raft member, these are the nodes running raft.
RaftNodes []string `protobuf:"bytes,3,rep,name=RaftNodes" json:"RaftNodes,omitempty"` RaftNodes []string `protobuf:"bytes,3,rep" json:"RaftNodes,omitempty"`
// The node ID assigned to the requesting node. // The node ID assigned to the requesting node.
NodeID *uint64 `protobuf:"varint,4,opt,name=NodeID" json:"NodeID,omitempty"` NodeID *uint64 `protobuf:"varint,4,opt" json:"NodeID,omitempty"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
} }
@ -1634,7 +1596,6 @@ func init() {
proto.RegisterExtension(E_SetDataCommand_Command) proto.RegisterExtension(E_SetDataCommand_Command)
proto.RegisterExtension(E_SetAdminPrivilegeCommand_Command) proto.RegisterExtension(E_SetAdminPrivilegeCommand_Command)
proto.RegisterExtension(E_UpdateNodeCommand_Command) proto.RegisterExtension(E_UpdateNodeCommand_Command)
proto.RegisterExtension(E_RenameDatabaseCommand_Command)
proto.RegisterExtension(E_CreateSubscriptionCommand_Command) proto.RegisterExtension(E_CreateSubscriptionCommand_Command)
proto.RegisterExtension(E_DropSubscriptionCommand_Command) proto.RegisterExtension(E_DropSubscriptionCommand_Command)
} }

View File

@ -112,9 +112,8 @@ message Command {
SetDataCommand = 17; SetDataCommand = 17;
SetAdminPrivilegeCommand = 18; SetAdminPrivilegeCommand = 18;
UpdateNodeCommand = 19; UpdateNodeCommand = 19;
RenameDatabaseCommand = 20; CreateSubscriptionCommand = 21;
CreateSubscriptionCommand = 22; DropSubscriptionCommand = 22;
DropSubscriptionCommand = 23;
} }
required Type type = 1; required Type type = 1;
@ -276,14 +275,6 @@ message UpdateNodeCommand {
required string Host = 2; required string Host = 2;
} }
message RenameDatabaseCommand {
extend Command {
optional RenameDatabaseCommand command = 120;
}
required string oldName = 1;
required string newName = 2;
}
message CreateSubscriptionCommand { message CreateSubscriptionCommand {
extend Command { extend Command {
optional CreateSubscriptionCommand command = 121; optional CreateSubscriptionCommand command = 121;

View File

@ -23,7 +23,6 @@ type StatementExecutor struct {
Databases() ([]DatabaseInfo, error) Databases() ([]DatabaseInfo, error)
CreateDatabase(name string) (*DatabaseInfo, error) CreateDatabase(name string) (*DatabaseInfo, error)
DropDatabase(name string) error DropDatabase(name string) error
RenameDatabase(oldName, newName string) error
DefaultRetentionPolicy(database string) (*RetentionPolicyInfo, error) DefaultRetentionPolicy(database string) (*RetentionPolicyInfo, error)
CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) (*RetentionPolicyInfo, error) CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) (*RetentionPolicyInfo, error)
@ -73,8 +72,6 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql.
return e.executeGrantStatement(stmt) return e.executeGrantStatement(stmt)
case *influxql.GrantAdminStatement: case *influxql.GrantAdminStatement:
return e.executeGrantAdminStatement(stmt) return e.executeGrantAdminStatement(stmt)
case *influxql.AlterDatabaseRenameStatement:
return e.executeAlterDatabaseRenameStatement(stmt)
case *influxql.RevokeStatement: case *influxql.RevokeStatement:
return e.executeRevokeStatement(stmt) return e.executeRevokeStatement(stmt)
case *influxql.RevokeAdminStatement: case *influxql.RevokeAdminStatement:
@ -224,10 +221,6 @@ func (e *StatementExecutor) executeGrantAdminStatement(stmt *influxql.GrantAdmin
return &influxql.Result{Err: e.Store.SetAdminPrivilege(stmt.User, true)} return &influxql.Result{Err: e.Store.SetAdminPrivilege(stmt.User, true)}
} }
func (e *StatementExecutor) executeAlterDatabaseRenameStatement(q *influxql.AlterDatabaseRenameStatement) *influxql.Result {
return &influxql.Result{Err: e.Store.RenameDatabase(q.OldName, q.NewName)}
}
func (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) *influxql.Result { func (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) *influxql.Result {
priv := influxql.NoPrivileges priv := influxql.NoPrivileges

View File

@ -46,26 +46,6 @@ func TestStatementExecutor_ExecuteStatement_DropDatabase(t *testing.T) {
} }
} }
// Ensure an ALTER DATABASE ... RENAME TO ... statement can be executed.
func TestStatementExecutor_ExecuteStatement_AlterDatabaseRename(t *testing.T) {
e := NewStatementExecutor()
e.Store.RenameDatabaseFn = func(oldName, newName string) error {
if oldName != "old_foo" {
t.Fatalf("unexpected name: %s", oldName)
}
if newName != "new_foo" {
t.Fatalf("unexpected name: %s", newName)
}
return nil
}
if res := e.ExecuteStatement(influxql.MustParseStatement(`ALTER DATABASE old_foo RENAME TO new_foo`)); res.Err != nil {
t.Fatal(res.Err)
} else if res.Series != nil {
t.Fatalf("unexpected rows: %#v", res.Series)
}
}
// Ensure a SHOW DATABASES statement can be executed. // Ensure a SHOW DATABASES statement can be executed.
func TestStatementExecutor_ExecuteStatement_ShowDatabases(t *testing.T) { func TestStatementExecutor_ExecuteStatement_ShowDatabases(t *testing.T) {
e := NewStatementExecutor() e := NewStatementExecutor()
@ -1056,7 +1036,6 @@ type StatementExecutorStore struct {
CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error) CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error)
DropDatabaseFn func(name string) error DropDatabaseFn func(name string) error
DeleteNodeFn func(nodeID uint64, force bool) error DeleteNodeFn func(nodeID uint64, force bool) error
RenameDatabaseFn func(oldName, newName string) error
DefaultRetentionPolicyFn func(database string) (*meta.RetentionPolicyInfo, error) DefaultRetentionPolicyFn func(database string) (*meta.RetentionPolicyInfo, error)
CreateRetentionPolicyFn func(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) CreateRetentionPolicyFn func(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error)
UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate) error UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate) error
@ -1116,10 +1095,6 @@ func (s *StatementExecutorStore) DropDatabase(name string) error {
return s.DropDatabaseFn(name) return s.DropDatabaseFn(name)
} }
func (s *StatementExecutorStore) RenameDatabase(oldName, newName string) error {
return s.RenameDatabaseFn(oldName, newName)
}
func (s *StatementExecutorStore) DefaultRetentionPolicy(database string) (*meta.RetentionPolicyInfo, error) { func (s *StatementExecutorStore) DefaultRetentionPolicy(database string) (*meta.RetentionPolicyInfo, error) {
return s.DefaultRetentionPolicyFn(database) return s.DefaultRetentionPolicyFn(database)
} }

View File

@ -88,7 +88,7 @@ type Store struct {
wg sync.WaitGroup wg sync.WaitGroup
changed chan struct{} changed chan struct{}
// clusterTracingEnabled controls whether low-level cluster communcation is logged. // clusterTracingEnabled controls whether low-level cluster communication is logged.
// Useful for troubleshooting // Useful for troubleshooting
clusterTracingEnabled bool clusterTracingEnabled bool
@ -927,16 +927,6 @@ func (s *Store) DropDatabase(name string) error {
) )
} }
// RenameDatabase renames a database in the metastore
func (s *Store) RenameDatabase(oldName, newName string) error {
return s.exec(internal.Command_RenameDatabaseCommand, internal.E_RenameDatabaseCommand_Command,
&internal.RenameDatabaseCommand{
OldName: proto.String(oldName),
NewName: proto.String(newName),
},
)
}
// RetentionPolicy returns a retention policy for a database by name. // RetentionPolicy returns a retention policy for a database by name.
func (s *Store) RetentionPolicy(database, name string) (rpi *RetentionPolicyInfo, err error) { func (s *Store) RetentionPolicy(database, name string) (rpi *RetentionPolicyInfo, err error) {
err = s.read(func(data *Data) error { err = s.read(func(data *Data) error {
@ -1668,8 +1658,6 @@ func (fsm *storeFSM) Apply(l *raft.Log) interface{} {
return fsm.applyCreateDatabaseCommand(&cmd) return fsm.applyCreateDatabaseCommand(&cmd)
case internal.Command_DropDatabaseCommand: case internal.Command_DropDatabaseCommand:
return fsm.applyDropDatabaseCommand(&cmd) return fsm.applyDropDatabaseCommand(&cmd)
case internal.Command_RenameDatabaseCommand:
return fsm.applyRenameDatabaseCommand(&cmd)
case internal.Command_CreateRetentionPolicyCommand: case internal.Command_CreateRetentionPolicyCommand:
return fsm.applyCreateRetentionPolicyCommand(&cmd) return fsm.applyCreateRetentionPolicyCommand(&cmd)
case internal.Command_DropRetentionPolicyCommand: case internal.Command_DropRetentionPolicyCommand:
@ -1798,20 +1786,6 @@ func (fsm *storeFSM) applyDropDatabaseCommand(cmd *internal.Command) interface{}
return nil return nil
} }
func (fsm *storeFSM) applyRenameDatabaseCommand(cmd *internal.Command) interface{} {
ext, _ := proto.GetExtension(cmd, internal.E_RenameDatabaseCommand_Command)
v := ext.(*internal.RenameDatabaseCommand)
// Copy data and update.
other := fsm.data.Clone()
if err := other.RenameDatabase(v.GetOldName(), v.GetNewName()); err != nil {
return err
}
fsm.data = other
return nil
}
func (fsm *storeFSM) applyCreateRetentionPolicyCommand(cmd *internal.Command) interface{} { func (fsm *storeFSM) applyCreateRetentionPolicyCommand(cmd *internal.Command) interface{} {
ext, _ := proto.GetExtension(cmd, internal.E_CreateRetentionPolicyCommand_Command) ext, _ := proto.GetExtension(cmd, internal.E_CreateRetentionPolicyCommand_Command)
v := ext.(*internal.CreateRetentionPolicyCommand) v := ext.(*internal.CreateRetentionPolicyCommand)

View File

@ -244,76 +244,6 @@ func TestStore_DropDatabase_ErrDatabaseNotFound(t *testing.T) {
} }
} }
// Ensure the store can rename an existing database.
func TestStore_RenameDatabase(t *testing.T) {
t.Parallel()
s := MustOpenStore()
defer s.Close()
// Create three databases.
for i := 0; i < 3; i++ {
if _, err := s.CreateDatabase(fmt.Sprintf("db%d", i)); err != nil {
t.Fatal(err)
}
}
// Rename database db1, leaving db0 and db2 unchanged.
if err := s.RenameDatabase("db1", "db3"); err != nil {
t.Fatal(err)
}
// Ensure the nodes are correct.
exp := &meta.DatabaseInfo{Name: "db0"}
if di, _ := s.Database("db0"); !reflect.DeepEqual(di, exp) {
t.Fatalf("unexpected database(0): \ngot: %#v\nexp: %#v", di, exp)
}
if di, _ := s.Database("db1"); di != nil {
t.Fatalf("unexpected database(1): %#v", di)
}
exp = &meta.DatabaseInfo{Name: "db2"}
if di, _ := s.Database("db2"); !reflect.DeepEqual(di, exp) {
t.Fatalf("unexpected database(2): \ngot: %#v\nexp: %#v", di, exp)
}
exp = &meta.DatabaseInfo{Name: "db3"}
if di, _ := s.Database("db3"); !reflect.DeepEqual(di, exp) {
t.Fatalf("unexpected database(2): \ngot: %#v\nexp: %#v", di, exp)
}
}
// Ensure the store returns an error when renaming a database that doesn't exist.
func TestStore_RenameDatabase_ErrDatabaseNotFound(t *testing.T) {
t.Parallel()
s := MustOpenStore()
defer s.Close()
if err := s.RenameDatabase("no_such_database", "another_database"); err != meta.ErrDatabaseNotFound {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure the store returns an error when renaming a database to a database that already exists.
func TestStore_RenameDatabase_ErrDatabaseExists(t *testing.T) {
t.Parallel()
s := MustOpenStore()
defer s.Close()
// create two databases
if _, err := s.CreateDatabase("db00"); err != nil {
t.Fatal(err)
}
if _, err := s.CreateDatabase("db01"); err != nil {
t.Fatal(err)
}
if err := s.RenameDatabase("db00", "db01"); err != meta.ErrDatabaseExists {
t.Fatalf("unexpected error: %s", err)
}
}
// Ensure the store can create a retention policy on a database. // Ensure the store can create a retention policy on a database.
func TestStore_CreateRetentionPolicy(t *testing.T) { func TestStore_CreateRetentionPolicy(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -341,7 +341,7 @@ func scanKey(buf []byte, i int) (int, []byte, error) {
} }
// Now we know where the key region is within buf, and the locations of tags, we // Now we know where the key region is within buf, and the locations of tags, we
// need to deterimine if duplicate tags exist and if the tags are sorted. This iterates // need to determine if duplicate tags exist and if the tags are sorted. This iterates
// 1/2 of the list comparing each end with each other, walking towards the center from // 1/2 of the list comparing each end with each other, walking towards the center from
// both sides. // both sides.
for j := 0; j < commas/2; j++ { for j := 0; j < commas/2; j++ {
@ -531,9 +531,14 @@ func scanTime(buf []byte, i int) (int, []byte, error) {
break break
} }
// Timestamps should integers, make sure they are so we don't need to actually // Timestamps should be integers, make sure they are so we don't need to actually
// parse the timestamp until needed // parse the timestamp until needed
if buf[i] < '0' || buf[i] > '9' { if buf[i] < '0' || buf[i] > '9' {
// Handle negative timestamps
if i == start && buf[i] == '-' {
i += 1
continue
}
return i, buf[start:i], fmt.Errorf("bad timestamp") return i, buf[start:i], fmt.Errorf("bad timestamp")
} }

View File

@ -330,7 +330,7 @@ func TestParsePointMaxInt64(t *testing.T) {
t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807i`, err) t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807i`, err)
} }
if exp, got := int64(9223372036854775807), p[0].Fields()["value"].(int64); exp != got { if exp, got := int64(9223372036854775807), p[0].Fields()["value"].(int64); exp != got {
t.Fatalf("ParsePoints Value mistmatch. \nexp: %v\ngot: %v", exp, got) t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got)
} }
// leading zeros // leading zeros
@ -532,7 +532,7 @@ func TestParsePointUnescape(t *testing.T) {
}, },
time.Unix(0, 0))) time.Unix(0, 0)))
// commas in measuremnt name // commas in measurement name
test(t, `cpu\,main,regions=east\,west value=1.0`, test(t, `cpu\,main,regions=east\,west value=1.0`,
models.NewPoint( models.NewPoint(
"cpu,main", // comma in the name "cpu,main", // comma in the name
@ -975,6 +975,69 @@ func TestParsePointUnicodeString(t *testing.T) {
) )
} }
func TestParsePointNegativeTimestamp(t *testing.T) {
test(t, `cpu value=1 -1`,
models.NewPoint(
"cpu",
models.Tags{},
models.Fields{
"value": 1.0,
},
time.Unix(0, -1)),
)
}
func TestParsePointMaxTimestamp(t *testing.T) {
test(t, `cpu value=1 9223372036854775807`,
models.NewPoint(
"cpu",
models.Tags{},
models.Fields{
"value": 1.0,
},
time.Unix(0, int64(1<<63-1))),
)
}
func TestParsePointMinTimestamp(t *testing.T) {
test(t, `cpu value=1 -9223372036854775807`,
models.NewPoint(
"cpu",
models.Tags{},
models.Fields{
"value": 1.0,
},
time.Unix(0, -int64(1<<63-1))),
)
}
func TestParsePointInvalidTimestamp(t *testing.T) {
_, err := models.ParsePointsString("cpu value=1 9223372036854775808")
if err == nil {
t.Fatalf("ParsePoints failed: %v", err)
}
_, err = models.ParsePointsString("cpu value=1 -92233720368547758078")
if err == nil {
t.Fatalf("ParsePoints failed: %v", err)
}
_, err = models.ParsePointsString("cpu value=1 -")
if err == nil {
t.Fatalf("ParsePoints failed: %v", err)
}
_, err = models.ParsePointsString("cpu value=1 -/")
if err == nil {
t.Fatalf("ParsePoints failed: %v", err)
}
_, err = models.ParsePointsString("cpu value=1 -1?")
if err == nil {
t.Fatalf("ParsePoints failed: %v", err)
}
_, err = models.ParsePointsString("cpu value=1 1-")
if err == nil {
t.Fatalf("ParsePoints failed: %v", err)
}
}
func TestNewPointFloatWithoutDecimal(t *testing.T) { func TestNewPointFloatWithoutDecimal(t *testing.T) {
test(t, `cpu value=1 1000000000`, test(t, `cpu value=1 1000000000`,
models.NewPoint( models.NewPoint(
@ -1064,7 +1127,6 @@ func TestNewPointNaN(t *testing.T) {
}, },
time.Unix(0, 0)), time.Unix(0, 0)),
) )
} }
func TestNewPointLargeNumberOfTags(t *testing.T) { func TestNewPointLargeNumberOfTags(t *testing.T) {
@ -1105,7 +1167,6 @@ func TestParsePointIntsFloats(t *testing.T) {
if _, ok := pt.Fields()["float2"].(float64); !ok { if _, ok := pt.Fields()["float2"].(float64); !ok {
t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(12.1)) t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(12.1))
} }
} }
func TestParsePointKeyUnsorted(t *testing.T) { func TestParsePointKeyUnsorted(t *testing.T) {

View File

@ -115,7 +115,7 @@ func New(c Config) *Monitor {
} }
// Open opens the monitoring system, using the given clusterID, node ID, and hostname // Open opens the monitoring system, using the given clusterID, node ID, and hostname
// for identification purposem. // for identification purpose.
func (m *Monitor) Open() error { func (m *Monitor) Open() error {
m.Logger.Printf("Starting monitor system") m.Logger.Printf("Starting monitor system")
@ -171,8 +171,8 @@ func (m *Monitor) DeregisterDiagnosticsClient(name string) {
// Statistics returns the combined statistics for all expvar data. The given // Statistics returns the combined statistics for all expvar data. The given
// tags are added to each of the returned statistics. // tags are added to each of the returned statistics.
func (m *Monitor) Statistics(tags map[string]string) ([]*statistic, error) { func (m *Monitor) Statistics(tags map[string]string) ([]*Statistic, error) {
statistics := make([]*statistic, 0) statistics := make([]*Statistic, 0)
expvar.Do(func(kv expvar.KeyValue) { expvar.Do(func(kv expvar.KeyValue) {
// Skip built-in expvar stats. // Skip built-in expvar stats.
@ -180,7 +180,7 @@ func (m *Monitor) Statistics(tags map[string]string) ([]*statistic, error) {
return return
} }
statistic := &statistic{ statistic := &Statistic{
Tags: make(map[string]string), Tags: make(map[string]string),
Values: make(map[string]interface{}), Values: make(map[string]interface{}),
} }
@ -246,7 +246,7 @@ func (m *Monitor) Statistics(tags map[string]string) ([]*statistic, error) {
}) })
// Add Go memstats. // Add Go memstats.
statistic := &statistic{ statistic := &Statistic{
Name: "runtime", Name: "runtime",
Tags: make(map[string]string), Tags: make(map[string]string),
Values: make(map[string]interface{}), Values: make(map[string]interface{}),
@ -388,16 +388,16 @@ func (m *Monitor) storeStatistics() {
} }
} }
// statistic represents the information returned by a single monitor client. // Statistic represents the information returned by a single monitor client.
type statistic struct { type Statistic struct {
Name string Name string `json:"name"`
Tags map[string]string Tags map[string]string `json:"tags"`
Values map[string]interface{} Values map[string]interface{} `json:"values"`
} }
// newStatistic returns a new statistic object. // newStatistic returns a new statistic object.
func newStatistic(name string, tags map[string]string, values map[string]interface{}) *statistic { func newStatistic(name string, tags map[string]string, values map[string]interface{}) *Statistic {
return &statistic{ return &Statistic{
Name: name, Name: name,
Tags: tags, Tags: tags,
Values: values, Values: values,
@ -405,7 +405,7 @@ func newStatistic(name string, tags map[string]string, values map[string]interfa
} }
// valueNames returns a sorted list of the value names, if any. // valueNames returns a sorted list of the value names, if any.
func (s *statistic) valueNames() []string { func (s *Statistic) valueNames() []string {
a := make([]string, 0, len(s.Values)) a := make([]string, 0, len(s.Values))
for k, _ := range s.Values { for k, _ := range s.Values {
a = append(a, k) a = append(a, k)

View File

@ -11,7 +11,7 @@ import (
// StatementExecutor translates InfluxQL queries to Monitor methods. // StatementExecutor translates InfluxQL queries to Monitor methods.
type StatementExecutor struct { type StatementExecutor struct {
Monitor interface { Monitor interface {
Statistics(map[string]string) ([]*statistic, error) Statistics(map[string]string) ([]*Statistic, error)
Diagnostics() (map[string]*Diagnostic, error) Diagnostics() (map[string]*Diagnostic, error)
} }
} }

View File

@ -47,6 +47,8 @@ func (c *tcpConnection) Close() {
} }
type Service struct { type Service struct {
mu sync.Mutex
bindAddress string bindAddress string
database string database string
protocol string protocol string
@ -121,6 +123,9 @@ func NewService(c Config) (*Service, error) {
// Open starts the Graphite input processing data. // Open starts the Graphite input processing data.
func (s *Service) Open() error { func (s *Service) Open() error {
s.mu.Lock()
defer s.mu.Unlock()
s.logger.Printf("Starting graphite service, batch size %d, batch timeout %s", s.batchSize, s.batchTimeout) s.logger.Printf("Starting graphite service, batch size %d, batch timeout %s", s.batchSize, s.batchTimeout)
// Configure expvar monitoring. It's OK to do this even if the service fails to open and // Configure expvar monitoring. It's OK to do this even if the service fails to open and
@ -176,6 +181,9 @@ func (s *Service) closeAllConnections() {
// Close stops all data processing on the Graphite input. // Close stops all data processing on the Graphite input.
func (s *Service) Close() error { func (s *Service) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
s.closeAllConnections() s.closeAllConnections()
if s.ln != nil { if s.ln != nil {
@ -185,7 +193,9 @@ func (s *Service) Close() error {
s.udpConn.Close() s.udpConn.Close()
} }
if s.batcher != nil {
s.batcher.Stop() s.batcher.Stop()
}
close(s.done) close(s.done)
s.wg.Wait() s.wg.Wait()
s.done = nil s.done = nil

View File

@ -0,0 +1,27 @@
package registration
import (
"time"
"github.com/influxdb/influxdb/toml"
)
const (
DefaultURL = "https://enterprise.influxdata.com"
DefaultStatsInterval = time.Minute
)
type Config struct {
Enabled bool `toml:"enabled"`
URL string `toml:"url"`
Token string `toml:"token"`
StatsInterval toml.Duration `toml:"stats-interval"`
}
func NewConfig() Config {
return Config{
Enabled: true,
URL: DefaultURL,
StatsInterval: toml.Duration(DefaultStatsInterval),
}
}

View File

@ -0,0 +1,33 @@
package registration_test
import (
"testing"
"time"
"github.com/BurntSushi/toml"
"github.com/influxdb/influxdb/services/registration"
)
func TestConfig_Parse(t *testing.T) {
// Parse configuration.
var c registration.Config
if _, err := toml.Decode(`
enabled = true
url = "a.b.c"
token = "1234"
stats-interval = "1s"
`, &c); err != nil {
t.Fatal(err)
}
// Validate configuration.
if c.Enabled != true {
t.Fatalf("unexpected enabled state: %v", c.Enabled)
} else if c.URL != "a.b.c" {
t.Fatalf("unexpected Enterprise URL: %s", c.URL)
} else if c.Token != "1234" {
t.Fatalf("unexpected Enterprise URL: %s", c.URL)
} else if time.Duration(c.StatsInterval) != time.Second {
t.Fatalf("unexpected stats interval: %v", c.StatsInterval)
}
}

View File

@ -0,0 +1,218 @@
package registration
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"sync"
"time"
"github.com/influxdb/influxdb/monitor"
)
// Service represents the registration service.
type Service struct {
MetaStore interface {
ClusterID() (uint64, error)
NodeID() uint64
}
Monitor interface {
Statistics(tags map[string]string) ([]*monitor.Statistic, error)
RegisterDiagnosticsClient(name string, client monitor.DiagsClient)
}
enabled bool
url *url.URL
token string
statsInterval time.Duration
version string
mu sync.Mutex
lastContact time.Time
wg sync.WaitGroup
done chan struct{}
logger *log.Logger
}
// NewService returns a configured registration service.
func NewService(c Config, version string) (*Service, error) {
url, err := url.Parse(c.URL)
if err != nil {
return nil, err
}
return &Service{
enabled: c.Enabled,
url: url,
token: c.Token,
statsInterval: time.Duration(c.StatsInterval),
version: version,
done: make(chan struct{}),
logger: log.New(os.Stderr, "[registration] ", log.LstdFlags),
}, nil
}
// Open starts retention policy enforcement.
func (s *Service) Open() error {
if !s.enabled {
return nil
}
s.logger.Println("Starting registration service")
if err := s.registerServer(); err != nil {
return err
}
// Register diagnostics if a Monitor service is available.
if s.Monitor != nil {
s.Monitor.RegisterDiagnosticsClient("registration", s)
}
s.wg.Add(1)
go s.reportStats()
return nil
}
// Close stops retention policy enforcement.
func (s *Service) Close() error {
s.logger.Println("registration service terminating")
close(s.done)
s.wg.Wait()
return nil
}
func (s *Service) Diagnostics() (*monitor.Diagnostic, error) {
diagnostics := map[string]interface{}{
"URL": s.url.String(),
"token": s.token,
"last_contact": s.getLastContact().String(),
}
return monitor.DiagnosticFromMap(diagnostics), nil
}
// registerServer registers the server.
func (s *Service) registerServer() error {
if !s.enabled || s.token == "" {
return nil
}
clusterID, err := s.MetaStore.ClusterID()
if err != nil {
s.logger.Printf("failed to retrieve cluster ID for registration: %s", err.Error())
return err
}
hostname, err := os.Hostname()
if err != nil {
return err
}
j := map[string]interface{}{
"cluster_id": fmt.Sprintf("%d", clusterID),
"server_id": fmt.Sprintf("%d", s.MetaStore.NodeID()),
"host": hostname,
"product": "influxdb",
"version": s.version,
}
b, err := json.Marshal(j)
if err != nil {
return err
}
url := fmt.Sprintf("%s/api/v1/servers?token=%s", s.url.String(), s.token)
s.wg.Add(1)
go func() {
defer s.wg.Done()
client := http.Client{Timeout: time.Duration(5 * time.Second)}
resp, err := client.Post(url, "application/json", bytes.NewBuffer(b))
if err != nil {
s.logger.Printf("failed to register server with %s: %s", s.url.String(), err.Error())
return
}
s.updateLastContact(time.Now().UTC())
defer resp.Body.Close()
if resp.StatusCode == http.StatusCreated {
return
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
s.logger.Printf("failed to read response from registration server: %s", err.Error())
return
}
s.logger.Printf("failed to register server with %s: received code %s, body: %s", s.url.String(), resp.Status, string(body))
}()
return nil
}
func (s *Service) reportStats() {
defer s.wg.Done()
if s.token == "" {
// No reporting, for now, without token.
return
}
statsURL := fmt.Sprintf("%s/api/v1/stats/influxdb?token=%s", s.url.String(), s.token)
clusterID, err := s.MetaStore.ClusterID()
if err != nil {
s.logger.Printf("failed to retrieve cluster ID for registration -- aborting stats upload: %s", err.Error())
return
}
t := time.NewTicker(s.statsInterval)
for {
select {
case <-t.C:
stats, err := s.Monitor.Statistics(nil)
if err != nil {
s.logger.Printf("failed to retrieve statistics: %s", err.Error())
continue
}
o := map[string]interface{}{
"cluster_id": fmt.Sprintf("%d", clusterID),
"server_id": fmt.Sprintf("%d", s.MetaStore.NodeID()),
"stats": stats,
}
b, err := json.Marshal(o)
if err != nil {
s.logger.Printf("failed to JSON-encode stats: %s", err.Error())
continue
}
client := http.Client{Timeout: time.Duration(5 * time.Second)}
resp, err := client.Post(statsURL, "application/json", bytes.NewBuffer(b))
if err != nil {
s.logger.Printf("failed to post statistics to %s: %s", statsURL, err.Error())
continue
}
s.updateLastContact(time.Now().UTC())
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
s.logger.Printf("failed to post statistics to %s: repsonse code: %d", statsURL, resp.StatusCode)
continue
}
case <-s.done:
return
}
}
}
func (s *Service) updateLastContact(t time.Time) {
s.mu.Lock()
defer s.mu.Unlock()
s.lastContact = t
}
func (s *Service) getLastContact() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.lastContact
}

View File

@ -29,7 +29,7 @@ type Service struct {
logger *log.Logger logger *log.Logger
} }
// NewService returns a configure retention policy enforcement service. // NewService returns a configured retention policy enforcement service.
func NewService(c Config) *Service { func NewService(c Config) *Service {
return &Service{ return &Service{
checkInterval: time.Duration(c.CheckInterval), checkInterval: time.Duration(c.CheckInterval),

View File

@ -89,7 +89,7 @@ type Config struct {
SSL bool `toml:"ssl"` SSL bool `toml:"ssl"`
} }
// NewSeries, takes a measurement, and point count, // NewSeries takes a measurement, and point count,
// and a series count and returns a series // and a series count and returns a series
func NewSeries(m string, p int, sc int) series { func NewSeries(m string, p int, sc int) series {
s := series{ s := series{

View File

@ -8,6 +8,8 @@ import (
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client"
) )
// QueryResults holds the total number of executed queries
// and the response time for each query
type QueryResults struct { type QueryResults struct {
TotalQueries int TotalQueries int
ResponseTimes ResponseTimes ResponseTimes ResponseTimes

View File

@ -80,13 +80,14 @@ type ResponseTime struct {
Time time.Time Time time.Time
} }
// newResponseTime returns a new response time // NewResponseTime returns a new response time
// with value `v` and time `time.Now()`. // with value `v` and time `time.Now()`.
func NewResponseTime(v int) ResponseTime { func NewResponseTime(v int) ResponseTime {
r := ResponseTime{Value: v, Time: time.Now()} r := ResponseTime{Value: v, Time: time.Now()}
return r return r
} }
// ResponseTimes is a slice of response times
type ResponseTimes []ResponseTime type ResponseTimes []ResponseTime
// Implements the `Len` method for the // Implements the `Len` method for the
@ -107,6 +108,7 @@ func (rs ResponseTimes) Swap(i, j int) {
rs[i], rs[j] = rs[j], rs[i] rs[i], rs[j] = rs[j], rs[i]
} }
// Measurements holds all measurement results of the stress test
type Measurements []string type Measurements []string
// String returns a string and implements the `String` method for // String returns a string and implements the `String` method for
@ -126,7 +128,7 @@ func (ms *Measurements) Set(value string) error {
return nil return nil
} }
// newClient returns a pointer to an InfluxDB client for // NewClient returns a pointer to an InfluxDB client for
// a `Config`'s `Address` field. If an error is encountered // a `Config`'s `Address` field. If an error is encountered
// when creating a new client, the function panics. // when creating a new client, the function panics.
func (cfg *Config) NewClient() (*client.Client, error) { func (cfg *Config) NewClient() (*client.Client, error) {

View File

@ -1649,6 +1649,11 @@ func (e *Engine) readSeries() (map[string]*tsdb.Series, error) {
// has future encoded blocks so that this method can know how much of its values can be // has future encoded blocks so that this method can know how much of its values can be
// combined and output in the resulting encoded block. // combined and output in the resulting encoded block.
func (e *Engine) DecodeAndCombine(newValues Values, block, buf []byte, nextTime int64, hasFutureBlock bool) (Values, []byte, error) { func (e *Engine) DecodeAndCombine(newValues Values, block, buf []byte, nextTime int64, hasFutureBlock bool) (Values, []byte, error) {
// No new values passed in, so nothing to combine. Just return the existing block.
if len(newValues) == 0 {
return newValues, block, nil
}
values, err := DecodeBlock(block) values, err := DecodeBlock(block)
if err != nil { if err != nil {
panic(fmt.Sprintf("failure decoding block: %v", err)) panic(fmt.Sprintf("failure decoding block: %v", err))

View File

@ -1013,7 +1013,81 @@ func TestEngine_WriteIntoCompactedFile(t *testing.T) {
} }
if count := e.DataFileCount(); count != 1 { if count := e.DataFileCount(); count != 1 {
t.Fatalf("execpted 1 data file but got %d", count) t.Fatalf("expected 1 data file but got %d", count)
}
tx, _ := e.Begin(false)
defer tx.Rollback()
c := tx.Cursor("cpu,host=A", fields, nil, true)
k, _ := c.SeekTo(0)
if k != 1000000000 {
t.Fatalf("wrong time: %d", k)
}
k, _ = c.Next()
if k != 2000000000 {
t.Fatalf("wrong time: %d", k)
}
k, _ = c.Next()
if k != 2500000000 {
t.Fatalf("wrong time: %d", k)
}
k, _ = c.Next()
if k != 3000000000 {
t.Fatalf("wrong time: %d", k)
}
k, _ = c.Next()
if k != 4000000000 {
t.Fatalf("wrong time: %d", k)
}
}
func TestEngine_WriteIntoCompactedFile_MaxPointsPerBlockZero(t *testing.T) {
e := OpenDefaultEngine()
defer e.Close()
fields := []string{"value"}
e.MaxPointsPerBlock = 4
e.RotateFileSize = 10
p1 := parsePoint("cpu,host=A value=1.1 1000000000")
p2 := parsePoint("cpu,host=A value=1.2 2000000000")
p3 := parsePoint("cpu,host=A value=1.3 3000000000")
p4 := parsePoint("cpu,host=A value=1.5 4000000000")
p5 := parsePoint("cpu,host=A value=1.6 2500000000")
p6 := parsePoint("cpu,host=A value=1.7 5000000000")
p7 := parsePoint("cpu,host=A value=1.8 6000000000")
p8 := parsePoint("cpu,host=A value=1.9 7000000000")
if err := e.WritePoints([]models.Point{p1, p2}, nil, nil); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.WritePoints([]models.Point{p3}, nil, nil); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.Compact(true); err != nil {
t.Fatalf("error compacting: %s", err.Error())
}
if err := e.WritePoints([]models.Point{p4}, nil, nil); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.WritePoints([]models.Point{p6, p7, p8}, nil, nil); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.Compact(true); err != nil {
t.Fatalf("error compacting: %s", err.Error())
}
if err := e.WritePoints([]models.Point{p5}, nil, nil); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if count := e.DataFileCount(); count != 1 {
t.Fatalf("expected 1 data file but got %d", count)
} }
tx, _ := e.Begin(false) tx, _ := e.Begin(false)
@ -1353,6 +1427,32 @@ func TestEngine_RewriteFileAndCompact(t *testing.T) {
}() }()
} }
func TestEngine_DecodeAndCombine_NoNewValues(t *testing.T) {
var newValues tsm1.Values
e := OpenDefaultEngine()
defer e.Engine.Close()
values := make(tsm1.Values, 1)
values[0] = tsm1.NewValue(time.Unix(0, 0), float64(1))
block, err := values.Encode(nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
remaining, encoded, err := e.DecodeAndCombine(newValues, block, nil, time.Unix(1, 0).UnixNano(), false)
if len(remaining) != 0 {
t.Fatalf("unexpected remaining values: exp %v, got %v", 0, len(remaining))
}
if len(encoded) != len(block) {
t.Fatalf("unexpected encoded block length: exp %v, got %v", len(block), len(encoded))
}
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
// Engine represents a test wrapper for tsm1.Engine. // Engine represents a test wrapper for tsm1.Engine.
type Engine struct { type Engine struct {
*tsm1.Engine *tsm1.Engine

View File

@ -29,8 +29,11 @@ func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point
for _, v := range row.Values { for _, v := range row.Values {
vals := make(map[string]interface{}) vals := make(map[string]interface{})
for fieldName, fieldIndex := range fieldIndexes { for fieldName, fieldIndex := range fieldIndexes {
val := v[fieldIndex]
if val != nil {
vals[fieldName] = v[fieldIndex] vals[fieldName] = v[fieldIndex]
} }
}
p := models.NewPoint(measurementName, row.Tags, vals, v[timeIndex].(time.Time)) p := models.NewPoint(measurementName, row.Tags, vals, v[timeIndex].(time.Time))

View File

@ -739,6 +739,9 @@ func (q *QueryExecutor) writeInto(row *models.Row, selectstmt *influxql.SelectSt
// limitedRowWriter and ExecuteAggregate/Raw makes it ridiculously hard to make sure that the // limitedRowWriter and ExecuteAggregate/Raw makes it ridiculously hard to make sure that the
// results will be the same as when queried normally. // results will be the same as when queried normally.
measurement := intoMeasurement(selectstmt) measurement := intoMeasurement(selectstmt)
if measurement == "" {
measurement = row.Name
}
intodb, err := intoDB(selectstmt) intodb, err := intoDB(selectstmt)
if err != nil { if err != nil {
return err return err
@ -748,14 +751,6 @@ func (q *QueryExecutor) writeInto(row *models.Row, selectstmt *influxql.SelectSt
if err != nil { if err != nil {
return err return err
} }
for _, p := range points {
fields := p.Fields()
for _, v := range fields {
if v == nil {
return nil
}
}
}
req := &IntoWriteRequest{ req := &IntoWriteRequest{
Database: intodb, Database: intodb,
RetentionPolicy: rp, RetentionPolicy: rp,

View File

@ -164,6 +164,16 @@ func (m *ShowMeasurementsMapper) Open() error {
// Start a goroutine to send the names over the channel as needed. // Start a goroutine to send the names over the channel as needed.
go func() { go func() {
for _, mm := range measurements { for _, mm := range measurements {
// Filter measurements by WITH clause, if one was given.
if m.stmt.Source != nil {
s, ok := m.stmt.Source.(*influxql.Measurement)
if !ok ||
s.Regex != nil && !s.Regex.Val.MatchString(mm.Name) ||
s.Name != "" && s.Name != mm.Name {
continue
}
}
ch <- mm.Name ch <- mm.Name
} }
close(ch) close(ch)

View File

@ -0,0 +1,65 @@
// The suite package contains logic for creating testing suite structs
// and running the methods on those structs as tests. The most useful
// piece of this package is that you can create setup/teardown methods
// on your testing suites, which will run before/after the whole suite
// or individual tests (depending on which interface(s) you
// implement).
//
// A testing suite is usually built by first extending the built-in
// suite functionality from suite.Suite in testify. Alternatively,
// you could reproduce that logic on your own if you wanted (you
// just need to implement the TestingSuite interface from
// suite/interfaces.go).
//
// After that, you can implement any of the interfaces in
// suite/interfaces.go to add setup/teardown functionality to your
// suite, and add any methods that start with "Test" to add tests.
// Methods that do not match any suite interfaces and do not begin
// with "Test" will not be run by testify, and can safely be used as
// helper methods.
//
// Once you've built your testing suite, you need to run the suite
// (using suite.Run from testify) inside any function that matches the
// identity that "go test" is already looking for (i.e.
// func(*testing.T)).
//
// Regular expression to select test suites specified command-line
// argument "-run". Regular expression to select the methods
// of test suites specified command-line argument "-m".
// Suite object has assertion methods.
//
// A crude example:
// // Basic imports
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// "github.com/stretchr/testify/suite"
// )
//
// // Define the suite, and absorb the built-in basic suite
// // functionality from testify - including a T() method which
// // returns the current testing context
// type ExampleTestSuite struct {
// suite.Suite
// VariableThatShouldStartAtFive int
// }
//
// // Make sure that VariableThatShouldStartAtFive is set to five
// // before each test
// func (suite *ExampleTestSuite) SetupTest() {
// suite.VariableThatShouldStartAtFive = 5
// }
//
// // All methods that begin with "Test" are run as tests within a
// // suite.
// func (suite *ExampleTestSuite) TestExample() {
// assert.Equal(suite.T(), suite.VariableThatShouldStartAtFive, 5)
// suite.Equal(suite.VariableThatShouldStartAtFive, 5)
// }
//
// // In order for 'go test' to run this suite, we need to create
// // a normal test function and pass our suite to suite.Run
// func TestExampleTestSuite(t *testing.T) {
// suite.Run(t, new(ExampleTestSuite))
// }
package suite

View File

@ -0,0 +1,34 @@
package suite
import "testing"
// TestingSuite can store and return the current *testing.T context
// generated by 'go test'.
type TestingSuite interface {
T() *testing.T
SetT(*testing.T)
}
// SetupAllSuite has a SetupSuite method, which will run before the
// tests in the suite are run.
type SetupAllSuite interface {
SetupSuite()
}
// SetupTestSuite has a SetupTest method, which will run before each
// test in the suite.
type SetupTestSuite interface {
SetupTest()
}
// TearDownAllSuite has a TearDownSuite method, which will run after
// all the tests in the suite have been run.
type TearDownAllSuite interface {
TearDownSuite()
}
// TearDownTestSuite has a TearDownTest method, which will run after
// each test in the suite.
type TearDownTestSuite interface {
TearDownTest()
}

View File

@ -0,0 +1,114 @@
package suite
import (
"flag"
"fmt"
"os"
"reflect"
"regexp"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var matchMethod = flag.String("m", "", "regular expression to select tests of the suite to run")
// Suite is a basic testing suite with methods for storing and
// retrieving the current *testing.T context.
type Suite struct {
*assert.Assertions
require *require.Assertions
t *testing.T
}
// T retrieves the current *testing.T context.
func (suite *Suite) T() *testing.T {
return suite.t
}
// SetT sets the current *testing.T context.
func (suite *Suite) SetT(t *testing.T) {
suite.t = t
suite.Assertions = assert.New(t)
}
// Require returns a require context for suite.
func (suite *Suite) Require() *require.Assertions {
if suite.require == nil {
suite.require = require.New(suite.T())
}
return suite.require
}
// Assert returns an assert context for suite. Normally, you can call
// `suite.NoError(expected, actual)`, but for situations where the embedded
// methods are overridden (for example, you might want to override
// assert.Assertions with require.Assertions), this method is provided so you
// can call `suite.Assert().NoError()`.
func (suite *Suite) Assert() *assert.Assertions {
if suite.Assertions == nil {
suite.Assertions = assert.New(suite.T())
}
return suite.Assertions
}
// Run takes a testing suite and runs all of the tests attached
// to it.
func Run(t *testing.T, suite TestingSuite) {
suite.SetT(t)
if setupAllSuite, ok := suite.(SetupAllSuite); ok {
setupAllSuite.SetupSuite()
}
defer func() {
if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok {
tearDownAllSuite.TearDownSuite()
}
}()
methodFinder := reflect.TypeOf(suite)
tests := []testing.InternalTest{}
for index := 0; index < methodFinder.NumMethod(); index++ {
method := methodFinder.Method(index)
ok, err := methodFilter(method.Name)
if err != nil {
fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err)
os.Exit(1)
}
if ok {
test := testing.InternalTest{
Name: method.Name,
F: func(t *testing.T) {
parentT := suite.T()
suite.SetT(t)
if setupTestSuite, ok := suite.(SetupTestSuite); ok {
setupTestSuite.SetupTest()
}
defer func() {
if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok {
tearDownTestSuite.TearDownTest()
}
suite.SetT(parentT)
}()
method.Func.Call([]reflect.Value{reflect.ValueOf(suite)})
},
}
tests = append(tests, test)
}
}
if !testing.RunTests(func(_, _ string) (bool, error) { return true, nil },
tests) {
t.Fail()
}
}
// Filtering method according to set regular expression
// specified command-line argument -m
func methodFilter(name string) (bool, error) {
if ok, _ := regexp.MatchString("^Test", name); !ok {
return false, nil
}
return regexp.MatchString(*matchMethod, name)
}

View File

@ -37,8 +37,11 @@ ifeq ($(UNAME), Linux)
ADVERTISED_HOST=localhost docker-compose --file scripts/docker-compose.yml up -d ADVERTISED_HOST=localhost docker-compose --file scripts/docker-compose.yml up -d
endif endif
test: prepare docker-compose test: test-cleanup prepare docker-compose
$(GOBIN)/godep go test ./... # Sleeping for kafka leadership election, TSDB setup, etc.
sleep 30
# Setup SUCCESS, running tests
godep go test ./...
test-short: prepare test-short: prepare
$(GOBIN)/godep go test -short ./... $(GOBIN)/godep go test -short ./...

View File

@ -67,23 +67,23 @@ brew install telegraf
### From Source: ### From Source:
Telegraf manages dependencies via `godep`, which gets installed via the Makefile Telegraf manages dependencies via `godep`, which gets installed via the Makefile
if you don't have it already. You also must build with golang version 1.4+ if you don't have it already. You also must build with golang version 1.4+.
1. [Install Go](https://golang.org/doc/install) 1. [Install Go](https://golang.org/doc/install)
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH) 2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
3. run `go get github.com/influxdb/telegraf` 3. Run `go get github.com/influxdb/telegraf`
4. `cd $GOPATH/src/github.com/influxdb/telegraf` 4. Run `cd $GOPATH/src/github.com/influxdb/telegraf`
5. run `make` 5. Run `make`
### How to use it: ### How to use it:
* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration * Run `telegraf -sample-config > telegraf.conf` to create an initial configuration.
* Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf` * Or run `telegraf -sample-config -filter cpu:mem -outputfilter influxdb > telegraf.conf`.
to create a config file with only CPU and memory plugins defined, and InfluxDB output defined to create a config file with only CPU and memory plugins defined, and InfluxDB output defined.
* Edit the configuration to match your needs * Edit the configuration to match your needs.
* Run `telegraf -config telegraf.conf -test` to output one full measurement sample to STDOUT * Run `telegraf -config telegraf.conf -test` to output one full measurement sample to STDOUT.
* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs. * Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs.
* Run `telegraf -config telegraf.conf -filter system:swap` * Run `telegraf -config telegraf.conf -filter system:swap`.
to run telegraf with only the system & swap plugins defined in the config. to run telegraf with only the system & swap plugins defined in the config.
## Telegraf Options ## Telegraf Options
@ -134,6 +134,7 @@ measurements at a 10s interval and will collect totalcpu & percpu data.
[outputs.influxdb] [outputs.influxdb]
url = "http://192.168.59.103:8086" # required. url = "http://192.168.59.103:8086" # required.
database = "telegraf" # required. database = "telegraf" # required.
precision = "s"
# PLUGINS # PLUGINS
[cpu] [cpu]
@ -160,11 +161,13 @@ Below is how to configure `tagpass` and `tagdrop` parameters (added in 0.1.5)
## Supported Plugins ## Supported Plugins
**You can view usage instructions for each plugin by running** **You can view usage instructions for each plugin by running**
`telegraf -usage <pluginname>` `telegraf -usage <pluginname>`.
Telegraf currently has support for collecting metrics from Telegraf currently has support for collecting metrics from:
* aerospike
* apache * apache
* bcache
* disque * disque
* elasticsearch * elasticsearch
* exec (generic JSON-emitting executable plugin) * exec (generic JSON-emitting executable plugin)
@ -196,9 +199,9 @@ Telegraf currently has support for collecting metrics from
* disk * disk
* swap * swap
## Service Plugins ## Supported Service Plugins
Telegraf can collect metrics via the following services Telegraf can collect metrics via the following services:
* statsd * statsd
@ -209,7 +212,7 @@ want to add support for another service or third-party API.
Telegraf also supports specifying multiple output sinks to send data to, Telegraf also supports specifying multiple output sinks to send data to,
configuring each output sink is different, but examples can be configuring each output sink is different, but examples can be
found by running `telegraf -sample-config` found by running `telegraf -sample-config`.
## Supported Outputs ## Supported Outputs
@ -219,9 +222,10 @@ found by running `telegraf -sample-config`
* opentsdb * opentsdb
* amqp (rabbitmq) * amqp (rabbitmq)
* mqtt * mqtt
* librato
## Contributing ## Contributing
Please see the Please see the
[contributing guide](CONTRIBUTING.md) [contributing guide](CONTRIBUTING.md)
for details on contributing a plugin or output to Telegraf for details on contributing a plugin or output to Telegraf.

View File

@ -2,178 +2,138 @@ package telegraf
import ( import (
"fmt" "fmt"
"sort"
"strings"
"sync" "sync"
"time" "time"
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
) )
// BatchPoints is used to send a batch of data in a single write from telegraf type Accumulator interface {
// to influx Add(measurement string, value interface{},
type BatchPoints struct { tags map[string]string, t ...time.Time)
AddFields(measurement string, fields map[string]interface{},
tags map[string]string, t ...time.Time)
SetDefaultTags(tags map[string]string)
AddDefaultTag(key, value string)
Prefix() string
SetPrefix(prefix string)
Debug() bool
SetDebug(enabled bool)
}
func NewAccumulator(
plugin *ConfiguredPlugin,
points chan *client.Point,
) Accumulator {
acc := accumulator{}
acc.points = points
acc.plugin = plugin
return &acc
}
type accumulator struct {
sync.Mutex sync.Mutex
client.BatchPoints points chan *client.Point
Debug bool defaultTags map[string]string
Prefix string debug bool
Config *ConfiguredPlugin plugin *ConfiguredPlugin
prefix string
} }
// deepcopy returns a deep copy of the BatchPoints object. This is primarily so func (ac *accumulator) Add(
// we can do multithreaded output flushing (see Agent.flush)
func (bp *BatchPoints) deepcopy() *BatchPoints {
bp.Lock()
defer bp.Unlock()
var bpc BatchPoints
bpc.Time = bp.Time
bpc.Precision = bp.Precision
bpc.Tags = make(map[string]string)
for k, v := range bp.Tags {
bpc.Tags[k] = v
}
var pts []client.Point
for _, pt := range bp.Points {
var ptc client.Point
ptc.Measurement = pt.Measurement
ptc.Time = pt.Time
ptc.Precision = pt.Precision
ptc.Raw = pt.Raw
ptc.Tags = make(map[string]string)
ptc.Fields = make(map[string]interface{})
for k, v := range pt.Tags {
ptc.Tags[k] = v
}
for k, v := range pt.Fields {
ptc.Fields[k] = v
}
pts = append(pts, ptc)
}
bpc.Points = pts
return &bpc
}
// Add adds a measurement
func (bp *BatchPoints) Add(
measurement string, measurement string,
val interface{}, value interface{},
tags map[string]string, tags map[string]string,
t ...time.Time,
) { ) {
fields := make(map[string]interface{}) fields := make(map[string]interface{})
fields["value"] = val fields["value"] = value
bp.AddFields(measurement, fields, tags) ac.AddFields(measurement, fields, tags, t...)
} }
// AddFieldsWithTime adds a measurement with a provided timestamp func (ac *accumulator) AddFields(
func (bp *BatchPoints) AddFieldsWithTime(
measurement string, measurement string,
fields map[string]interface{}, fields map[string]interface{},
tags map[string]string, tags map[string]string,
timestamp time.Time, t ...time.Time,
) { ) {
// TODO this function should add the fields with the timestamp, but that will
// need to wait for the InfluxDB point precision/unit to be fixed
bp.AddFields(measurement, fields, tags)
// bp.Lock()
// defer bp.Unlock()
// measurement = bp.Prefix + measurement if tags == nil {
tags = make(map[string]string)
}
// if bp.Config != nil { // InfluxDB client/points does not support writing uint64
// if !bp.Config.ShouldPass(measurement, tags) { // TODO fix when it does
// return // https://github.com/influxdb/influxdb/pull/4508
// } for k, v := range fields {
// } switch val := v.(type) {
case uint64:
if val < uint64(9223372036854775808) {
fields[k] = int64(val)
} else {
fields[k] = int64(9223372036854775807)
}
}
}
// if bp.Debug { var timestamp time.Time
// var tg []string if len(t) > 0 {
timestamp = t[0]
} else {
timestamp = time.Now()
}
// for k, v := range tags { if ac.plugin != nil {
// tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v)) if !ac.plugin.ShouldPass(measurement, tags) {
// }
// var vals []string
// for k, v := range fields {
// vals = append(vals, fmt.Sprintf("%s=%v", k, v))
// }
// sort.Strings(tg)
// sort.Strings(vals)
// fmt.Printf("> [%s] %s %s\n", strings.Join(tg, " "), measurement, strings.Join(vals, " "))
// }
// bp.Points = append(bp.Points, client.Point{
// Measurement: measurement,
// Tags: tags,
// Fields: fields,
// Time: timestamp,
// })
}
// AddFields will eventually replace the Add function, once we move to having a
// single plugin as a single measurement with multiple fields
func (bp *BatchPoints) AddFields(
measurement string,
fields map[string]interface{},
tags map[string]string,
) {
bp.Lock()
defer bp.Unlock()
measurement = bp.Prefix + measurement
if bp.Config != nil {
if !bp.Config.ShouldPass(measurement, tags) {
return return
} }
} }
// Apply BatchPoints tags to tags passed in, giving precedence to those for k, v := range ac.defaultTags {
// passed in. This is so that plugins have the ability to override global if _, ok := tags[k]; !ok {
// tags.
for k, v := range bp.Tags {
_, ok := tags[k]
if !ok {
tags[k] = v tags[k] = v
} }
} }
if bp.Debug { if ac.prefix != "" {
var tg []string measurement = ac.prefix + measurement
for k, v := range tags {
tg = append(tg, fmt.Sprintf("%s=\"%s\"", k, v))
} }
var vals []string pt := client.NewPoint(measurement, tags, fields, timestamp)
if ac.debug {
for k, v := range fields { fmt.Println("> " + pt.String())
vals = append(vals, fmt.Sprintf("%s=%v", k, v))
} }
ac.points <- pt
sort.Strings(tg) }
sort.Strings(vals)
func (ac *accumulator) SetDefaultTags(tags map[string]string) {
fmt.Printf("> [%s] %s %s\n", strings.Join(tg, " "), measurement, strings.Join(vals, " ")) ac.defaultTags = tags
} }
bp.Points = append(bp.Points, client.Point{ func (ac *accumulator) AddDefaultTag(key, value string) {
Measurement: measurement, ac.defaultTags[key] = value
Tags: tags, }
Fields: fields,
}) func (ac *accumulator) Prefix() string {
return ac.prefix
}
func (ac *accumulator) SetPrefix(prefix string) {
ac.prefix = prefix
}
func (ac *accumulator) Debug() bool {
return ac.debug
}
func (ac *accumulator) SetDebug(debug bool) {
ac.debug = debug
} }

344
agent.go
View File

@ -1,16 +1,20 @@
package telegraf package telegraf
import ( import (
"errors" "crypto/rand"
"fmt" "fmt"
"log" "log"
"math/big"
"os" "os"
"sort" "sort"
"sync" "sync"
"time" "time"
"github.com/influxdb/telegraf/duration"
"github.com/influxdb/telegraf/outputs" "github.com/influxdb/telegraf/outputs"
"github.com/influxdb/telegraf/plugins" "github.com/influxdb/telegraf/plugins"
"github.com/influxdb/influxdb/client/v2"
) )
type runningOutput struct { type runningOutput struct {
@ -28,7 +32,24 @@ type runningPlugin struct {
type Agent struct { type Agent struct {
// Interval at which to gather information // Interval at which to gather information
Interval Duration Interval duration.Duration
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
// Interval at which to flush data
FlushInterval duration.Duration
// FlushRetries is the number of times to retry each data flush
FlushRetries int
// FlushJitter tells
FlushJitter duration.Duration
// TODO(cam): Remove UTC and Precision parameters, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatability
// Option for outputting data in UTC // Option for outputting data in UTC
UTC bool `toml:"utc"` UTC bool `toml:"utc"`
@ -41,7 +62,7 @@ type Agent struct {
Debug bool Debug bool
Hostname string Hostname string
Config *Config Tags map[string]string
outputs []*runningOutput outputs []*runningOutput
plugins []*runningPlugin plugins []*runningPlugin
@ -50,10 +71,12 @@ type Agent struct {
// NewAgent returns an Agent struct based off the given Config // NewAgent returns an Agent struct based off the given Config
func NewAgent(config *Config) (*Agent, error) { func NewAgent(config *Config) (*Agent, error) {
agent := &Agent{ agent := &Agent{
Config: config, Tags: make(map[string]string),
Interval: Duration{10 * time.Second}, Interval: duration.Duration{10 * time.Second},
UTC: true, RoundInterval: true,
Precision: "s", FlushInterval: duration.Duration{10 * time.Second},
FlushRetries: 2,
FlushJitter: duration.Duration{5 * time.Second},
} }
// Apply the toml table to the agent config, overriding defaults // Apply the toml table to the agent config, overriding defaults
@ -71,11 +94,7 @@ func NewAgent(config *Config) (*Agent, error) {
agent.Hostname = hostname agent.Hostname = hostname
} }
if config.Tags == nil { agent.Tags["host"] = agent.Hostname
config.Tags = map[string]string{}
}
config.Tags["host"] = agent.Hostname
return agent, nil return agent, nil
} }
@ -112,10 +131,10 @@ func (a *Agent) Close() error {
} }
// LoadOutputs loads the agent's outputs // LoadOutputs loads the agent's outputs
func (a *Agent) LoadOutputs(filters []string) ([]string, error) { func (a *Agent) LoadOutputs(filters []string, config *Config) ([]string, error) {
var names []string var names []string
for _, name := range a.Config.OutputsDeclared() { for _, name := range config.OutputsDeclared() {
creator, ok := outputs.Outputs[name] creator, ok := outputs.Outputs[name]
if !ok { if !ok {
return nil, fmt.Errorf("Undefined but requested output: %s", name) return nil, fmt.Errorf("Undefined but requested output: %s", name)
@ -127,7 +146,7 @@ func (a *Agent) LoadOutputs(filters []string) ([]string, error) {
} }
output := creator() output := creator()
err := a.Config.ApplyOutput(name, output) err := config.ApplyOutput(name, output)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -143,10 +162,10 @@ func (a *Agent) LoadOutputs(filters []string) ([]string, error) {
} }
// LoadPlugins loads the agent's plugins // LoadPlugins loads the agent's plugins
func (a *Agent) LoadPlugins(filters []string) ([]string, error) { func (a *Agent) LoadPlugins(filters []string, config *Config) ([]string, error) {
var names []string var names []string
for _, name := range a.Config.PluginsDeclared() { for _, name := range config.PluginsDeclared() {
creator, ok := plugins.Plugins[name] creator, ok := plugins.Plugins[name]
if !ok { if !ok {
return nil, fmt.Errorf("Undefined but requested plugin: %s", name) return nil, fmt.Errorf("Undefined but requested plugin: %s", name)
@ -155,7 +174,7 @@ func (a *Agent) LoadPlugins(filters []string) ([]string, error) {
if sliceContains(name, filters) || len(filters) == 0 { if sliceContains(name, filters) || len(filters) == 0 {
plugin := creator() plugin := creator()
config, err := a.Config.ApplyPlugin(name, plugin) config, err := config.ApplyPlugin(name, plugin)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -170,11 +189,9 @@ func (a *Agent) LoadPlugins(filters []string) ([]string, error) {
return names, nil return names, nil
} }
// crankParallel runs the plugins that are using the same reporting interval // gatherParallel runs the plugins that are using the same reporting interval
// as the telegraf agent. // as the telegraf agent.
func (a *Agent) crankParallel() error { func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
points := make(chan *BatchPoints, len(a.plugins))
var wg sync.WaitGroup var wg sync.WaitGroup
start := time.Now() start := time.Now()
@ -189,100 +206,51 @@ func (a *Agent) crankParallel() error {
go func(plugin *runningPlugin) { go func(plugin *runningPlugin) {
defer wg.Done() defer wg.Done()
var bp BatchPoints acc := NewAccumulator(plugin.config, pointChan)
bp.Debug = a.Debug acc.SetDebug(a.Debug)
bp.Prefix = plugin.name + "_" acc.SetPrefix(plugin.name + "_")
bp.Config = plugin.config acc.SetDefaultTags(a.Tags)
bp.Precision = a.Precision
bp.Tags = a.Config.Tags
if err := plugin.plugin.Gather(&bp); err != nil { if err := plugin.plugin.Gather(acc); err != nil {
log.Printf("Error in plugin [%s]: %s", plugin.name, err) log.Printf("Error in plugin [%s]: %s", plugin.name, err)
} }
points <- &bp
}(plugin) }(plugin)
} }
wg.Wait() wg.Wait()
close(points)
var bp BatchPoints
bp.Time = time.Now()
if a.UTC {
bp.Time = bp.Time.UTC()
}
bp.Precision = a.Precision
for sub := range points {
bp.Points = append(bp.Points, sub.Points...)
}
elapsed := time.Since(start) elapsed := time.Since(start)
log.Printf("Cranking default (%s) interval, gathered %d metrics from %d plugins in %s\n", log.Printf("Gathered metrics, (%s interval), from %d plugins in %s\n",
a.Interval, len(bp.Points), counter, elapsed) a.Interval, counter, elapsed)
return a.flush(&bp) return nil
} }
// crank is mostly for test purposes. // gatherSeparate runs the plugins that have been configured with their own
func (a *Agent) crank() error {
var bp BatchPoints
bp.Debug = a.Debug
bp.Precision = a.Precision
for _, plugin := range a.plugins {
bp.Prefix = plugin.name + "_"
bp.Config = plugin.config
err := plugin.plugin.Gather(&bp)
if err != nil {
return err
}
}
bp.Tags = a.Config.Tags
bp.Time = time.Now()
if a.UTC {
bp.Time = bp.Time.UTC()
}
return a.flush(&bp)
}
// crankSeparate runs the plugins that have been configured with their own
// reporting interval. // reporting interval.
func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) error { func (a *Agent) gatherSeparate(
shutdown chan struct{},
plugin *runningPlugin,
pointChan chan *client.Point,
) error {
ticker := time.NewTicker(plugin.config.Interval) ticker := time.NewTicker(plugin.config.Interval)
for { for {
var bp BatchPoints
var outerr error var outerr error
start := time.Now() start := time.Now()
bp.Debug = a.Debug acc := NewAccumulator(plugin.config, pointChan)
acc.SetDebug(a.Debug)
acc.SetPrefix(plugin.name + "_")
acc.SetDefaultTags(a.Tags)
bp.Prefix = plugin.name + "_" if err := plugin.plugin.Gather(acc); err != nil {
bp.Config = plugin.config
bp.Precision = a.Precision
bp.Tags = a.Config.Tags
if err := plugin.plugin.Gather(&bp); err != nil {
log.Printf("Error in plugin [%s]: %s", plugin.name, err) log.Printf("Error in plugin [%s]: %s", plugin.name, err)
outerr = errors.New("Error encountered processing plugins & outputs")
}
bp.Time = time.Now()
if a.UTC {
bp.Time = bp.Time.UTC()
} }
elapsed := time.Since(start) elapsed := time.Since(start)
log.Printf("Cranking separate (%s) interval, gathered %d metrics from %s in %s\n", log.Printf("Gathered metrics, (separate %s interval), from %s in %s\n",
plugin.config.Interval, len(bp.Points), plugin.name, elapsed) plugin.config.Interval, plugin.name, elapsed)
if err := a.flush(&bp); err != nil {
outerr = errors.New("Error encountered processing plugins & outputs")
}
if outerr != nil { if outerr != nil {
return outerr return outerr
@ -297,47 +265,36 @@ func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) err
} }
} }
func (a *Agent) flush(bp *BatchPoints) error {
var wg sync.WaitGroup
var outerr error
for _, o := range a.outputs {
wg.Add(1)
// Copy BatchPoints
bpc := bp.deepcopy()
go func(ro *runningOutput) {
defer wg.Done()
// Log all output errors:
if err := ro.output.Write(bpc.BatchPoints); err != nil {
log.Printf("Error in output [%s]: %s", ro.name, err)
outerr = errors.New("Error encountered flushing outputs")
}
}(o)
}
wg.Wait()
return outerr
}
// Test verifies that we can 'Gather' from all plugins with their configured // Test verifies that we can 'Gather' from all plugins with their configured
// Config struct // Config struct
func (a *Agent) Test() error { func (a *Agent) Test() error {
var acc BatchPoints shutdown := make(chan struct{})
defer close(shutdown)
pointChan := make(chan *client.Point)
acc.Debug = true // dummy receiver for the point channel
go func() {
for {
select {
case <-pointChan:
// do nothing
case <-shutdown:
return
}
}
}()
for _, plugin := range a.plugins { for _, plugin := range a.plugins {
acc.Prefix = plugin.name + "_" acc := NewAccumulator(plugin.config, pointChan)
acc.Config = plugin.config acc.SetDebug(true)
acc.SetPrefix(plugin.name + "_")
fmt.Printf("* Plugin: %s, Collection 1\n", plugin.name) fmt.Printf("* Plugin: %s, Collection 1\n", plugin.name)
if plugin.config.Interval != 0 { if plugin.config.Interval != 0 {
fmt.Printf("* Internal: %s\n", plugin.config.Interval) fmt.Printf("* Internal: %s\n", plugin.config.Interval)
} }
if err := plugin.plugin.Gather(&acc); err != nil { if err := plugin.plugin.Gather(acc); err != nil {
return err return err
} }
@ -347,7 +304,7 @@ func (a *Agent) Test() error {
case "cpu": case "cpu":
time.Sleep(500 * time.Millisecond) time.Sleep(500 * time.Millisecond)
fmt.Printf("* Plugin: %s, Collection 2\n", plugin.name) fmt.Printf("* Plugin: %s, Collection 2\n", plugin.name)
if err := plugin.plugin.Gather(&acc); err != nil { if err := plugin.plugin.Gather(acc); err != nil {
return err return err
} }
} }
@ -356,10 +313,145 @@ func (a *Agent) Test() error {
return nil return nil
} }
// writeOutput writes a list of points to a single output, with retries.
// Optionally takes a `done` channel to indicate that it is done writing.
func (a *Agent) writeOutput(
points []*client.Point,
ro *runningOutput,
shutdown chan struct{},
wg *sync.WaitGroup,
) {
defer wg.Done()
if len(points) == 0 {
return
}
retry := 0
retries := a.FlushRetries
start := time.Now()
for {
err := ro.output.Write(points)
if err == nil {
// Write successful
elapsed := time.Since(start)
log.Printf("Flushed %d metrics to output %s in %s\n",
len(points), ro.name, elapsed)
return
}
select {
case <-shutdown:
return
default:
if retry >= retries {
// No more retries
msg := "FATAL: Write to output [%s] failed %d times, dropping" +
" %d metrics\n"
log.Printf(msg, ro.name, retries+1, len(points))
return
} else if err != nil {
// Sleep for a retry
log.Printf("Error in output [%s]: %s, retrying in %s",
ro.name, err.Error(), a.FlushInterval.Duration)
time.Sleep(a.FlushInterval.Duration)
}
}
retry++
}
}
// flush writes a list of points to all configured outputs
func (a *Agent) flush(
points []*client.Point,
shutdown chan struct{},
wait bool,
) {
var wg sync.WaitGroup
for _, o := range a.outputs {
wg.Add(1)
go a.writeOutput(points, o, shutdown, &wg)
}
if wait {
wg.Wait()
}
}
// flusher monitors the points input channel and flushes on the minimum interval
func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 100)
ticker := time.NewTicker(a.FlushInterval.Duration)
points := make([]*client.Point, 0)
for {
select {
case <-shutdown:
log.Println("Hang on, flushing any cached points before shutdown")
a.flush(points, shutdown, true)
return nil
case <-ticker.C:
a.flush(points, shutdown, false)
points = make([]*client.Point, 0)
case pt := <-pointChan:
points = append(points, pt)
}
}
}
// jitterInterval applies the the interval jitter to the flush interval using
// crypto/rand number generator
func jitterInterval(ininterval, injitter time.Duration) time.Duration {
var jitter int64
outinterval := ininterval
if injitter.Nanoseconds() != 0 {
maxjitter := big.NewInt(injitter.Nanoseconds())
if j, err := rand.Int(rand.Reader, maxjitter); err == nil {
jitter = j.Int64()
}
outinterval = time.Duration(jitter + ininterval.Nanoseconds())
}
if outinterval.Nanoseconds() < time.Duration(500*time.Millisecond).Nanoseconds() {
log.Printf("Flush interval %s too low, setting to 500ms\n", outinterval)
outinterval = time.Duration(500 * time.Millisecond)
}
return outinterval
}
// Run runs the agent daemon, gathering every Interval // Run runs the agent daemon, gathering every Interval
func (a *Agent) Run(shutdown chan struct{}) error { func (a *Agent) Run(shutdown chan struct{}) error {
var wg sync.WaitGroup var wg sync.WaitGroup
a.FlushInterval.Duration = jitterInterval(a.FlushInterval.Duration,
a.FlushJitter.Duration)
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
"Flush Interval:%s\n",
a.Interval, a.Debug, a.Hostname, a.FlushInterval)
// channel shared between all plugin threads for accumulating points
pointChan := make(chan *client.Point, 1000)
// Round collection to nearest interval by sleeping
if a.RoundInterval {
i := int64(a.Interval.Duration)
time.Sleep(time.Duration(i - (time.Now().UnixNano() % i)))
}
ticker := time.NewTicker(a.Interval.Duration)
wg.Add(1)
go func() {
defer wg.Done()
if err := a.flusher(shutdown, pointChan); err != nil {
log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown)
}
}()
for _, plugin := range a.plugins { for _, plugin := range a.plugins {
// Start service of any ServicePlugins // Start service of any ServicePlugins
@ -374,12 +466,12 @@ func (a *Agent) Run(shutdown chan struct{}) error {
} }
// Special handling for plugins that have their own collection interval // Special handling for plugins that have their own collection interval
// configured. Default intervals are handled below with crankParallel // configured. Default intervals are handled below with gatherParallel
if plugin.config.Interval != 0 { if plugin.config.Interval != 0 {
wg.Add(1) wg.Add(1)
go func(plugin *runningPlugin) { go func(plugin *runningPlugin) {
defer wg.Done() defer wg.Done()
if err := a.crankSeparate(shutdown, plugin); err != nil { if err := a.gatherSeparate(shutdown, plugin, pointChan); err != nil {
log.Printf(err.Error()) log.Printf(err.Error())
} }
}(plugin) }(plugin)
@ -388,10 +480,8 @@ func (a *Agent) Run(shutdown chan struct{}) error {
defer wg.Wait() defer wg.Wait()
ticker := time.NewTicker(a.Interval.Duration)
for { for {
if err := a.crankParallel(); err != nil { if err := a.gatherParallel(pointChan); err != nil {
log.Printf(err.Error()) log.Printf(err.Error())
} }

View File

@ -3,11 +3,14 @@ package telegraf
import ( import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"testing" "testing"
"time"
"github.com/influxdb/telegraf/duration"
// needing to load the plugins // needing to load the plugins
_ "github.com/influxdb/telegraf/plugins/all" _ "github.com/influxdb/telegraf/plugins/all"
// needing to load the outputs // needing to load the outputs
// _ "github.com/influxdb/telegraf/outputs/all" _ "github.com/influxdb/telegraf/outputs/all"
) )
func TestAgent_LoadPlugin(t *testing.T) { func TestAgent_LoadPlugin(t *testing.T) {
@ -16,103 +19,136 @@ func TestAgent_LoadPlugin(t *testing.T) {
config, _ := LoadConfig("./testdata/telegraf-agent.toml") config, _ := LoadConfig("./testdata/telegraf-agent.toml")
a, _ := NewAgent(config) a, _ := NewAgent(config)
pluginsEnabled, _ := a.LoadPlugins([]string{"mysql"}) pluginsEnabled, _ := a.LoadPlugins([]string{"mysql"}, config)
assert.Equal(t, 1, len(pluginsEnabled)) assert.Equal(t, 1, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins([]string{"foo"}) pluginsEnabled, _ = a.LoadPlugins([]string{"foo"}, config)
assert.Equal(t, 0, len(pluginsEnabled)) assert.Equal(t, 0, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "foo"}) pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "foo"}, config)
assert.Equal(t, 1, len(pluginsEnabled)) assert.Equal(t, 1, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "redis"}) pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "redis"}, config)
assert.Equal(t, 2, len(pluginsEnabled)) assert.Equal(t, 2, len(pluginsEnabled))
pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "foo", "redis", "bar"}) pluginsEnabled, _ = a.LoadPlugins([]string{"mysql", "foo", "redis", "bar"}, config)
assert.Equal(t, 2, len(pluginsEnabled)) assert.Equal(t, 2, len(pluginsEnabled))
} }
// TODO enable these unit tests, currently disabled because of a circular import func TestAgent_LoadOutput(t *testing.T) {
// func TestAgent_LoadOutput(t *testing.T) { // load a dedicated configuration file
// // load a dedicated configuration file config, _ := LoadConfig("./testdata/telegraf-agent.toml")
// config, _ := LoadConfig("./testdata/telegraf-agent.toml") a, _ := NewAgent(config)
// a, _ := NewAgent(config)
// outputsEnabled, _ := a.LoadOutputs([]string{"influxdb"}) outputsEnabled, _ := a.LoadOutputs([]string{"influxdb"}, config)
// assert.Equal(t, 1, len(outputsEnabled)) assert.Equal(t, 1, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{}) outputsEnabled, _ = a.LoadOutputs([]string{}, config)
// assert.Equal(t, 2, len(outputsEnabled)) assert.Equal(t, 2, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{"foo"}) outputsEnabled, _ = a.LoadOutputs([]string{"foo"}, config)
// assert.Equal(t, 0, len(outputsEnabled)) assert.Equal(t, 0, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "foo"}) outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "foo"}, config)
// assert.Equal(t, 1, len(outputsEnabled)) assert.Equal(t, 1, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "kafka"}) outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "kafka"}, config)
// assert.Equal(t, 2, len(outputsEnabled)) assert.Equal(t, 2, len(outputsEnabled))
// outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "foo", "kafka", "bar"}) outputsEnabled, _ = a.LoadOutputs([]string{"influxdb", "foo", "kafka", "bar"}, config)
// assert.Equal(t, 2, len(outputsEnabled)) assert.Equal(t, 2, len(outputsEnabled))
// }
/*
func TestAgent_DrivesMetrics(t *testing.T) {
var (
plugin plugins.MockPlugin
)
defer plugin.AssertExpectations(t)
defer metrics.AssertExpectations(t)
a := &Agent{
plugins: []plugins.Plugin{&plugin},
Config: &Config{},
}
plugin.On("Add", "foo", 1.2, nil).Return(nil)
plugin.On("Add", "bar", 888, nil).Return(nil)
err := a.crank()
require.NoError(t, err)
} }
func TestAgent_AppliesTags(t *testing.T) { func TestAgent_ZeroJitter(t *testing.T) {
var (
plugin plugins.MockPlugin
metrics MockMetrics
)
defer plugin.AssertExpectations(t)
defer metrics.AssertExpectations(t)
a := &Agent{ a := &Agent{
plugins: []plugins.Plugin{&plugin}, FlushInterval: duration.Duration{10 * time.Second},
metrics: &metrics, FlushJitter: duration.Duration{0 * time.Second},
Config: &Config{ }
Tags: map[string]string{ flushinterval := jitterInterval(a.FlushInterval.Duration,
"dc": "us-west-1", a.FlushJitter.Duration)
},
}, actual := flushinterval.Nanoseconds()
exp := time.Duration(10 * time.Second).Nanoseconds()
if actual != exp {
t.Errorf("Actual %v, expected %v", actual, exp)
}
}
func TestAgent_ZeroInterval(t *testing.T) {
min := time.Duration(500 * time.Millisecond).Nanoseconds()
max := time.Duration(5 * time.Second).Nanoseconds()
for i := 0; i < 1000; i++ {
a := &Agent{
FlushInterval: duration.Duration{0 * time.Second},
FlushJitter: duration.Duration{5 * time.Second},
} }
m1 := cypress.Metric() flushinterval := jitterInterval(a.FlushInterval.Duration,
m1.Add("name", "foo") a.FlushJitter.Duration)
m1.Add("value", 1.2) actual := flushinterval.Nanoseconds()
msgs := []*cypress.Message{m1} if actual > max {
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
m2 := cypress.Metric() break
m2.Timestamp = m1.Timestamp }
m2.Add("name", "foo") if actual < min {
m2.Add("value", 1.2) t.Errorf("Didn't expect interval %d to be < %d", actual, min)
m2.AddTag("dc", "us-west-1") break
}
plugin.On("Read").Return(msgs, nil) }
metrics.On("Receive", m2).Return(nil) }
err := a.crank() func TestAgent_ZeroBoth(t *testing.T) {
require.NoError(t, err) a := &Agent{
FlushInterval: duration.Duration{0 * time.Second},
FlushJitter: duration.Duration{0 * time.Second},
}
flushinterval := jitterInterval(a.FlushInterval.Duration,
a.FlushJitter.Duration)
actual := flushinterval
exp := time.Duration(500 * time.Millisecond)
if actual != exp {
t.Errorf("Actual %v, expected %v", actual, exp)
}
}
func TestAgent_JitterMax(t *testing.T) {
max := time.Duration(32 * time.Second).Nanoseconds()
for i := 0; i < 1000; i++ {
a := &Agent{
FlushInterval: duration.Duration{30 * time.Second},
FlushJitter: duration.Duration{2 * time.Second},
}
flushinterval := jitterInterval(a.FlushInterval.Duration,
a.FlushJitter.Duration)
actual := flushinterval.Nanoseconds()
if actual > max {
t.Errorf("Didn't expect interval %d to be > %d", actual, max)
break
}
}
}
func TestAgent_JitterMin(t *testing.T) {
min := time.Duration(30 * time.Second).Nanoseconds()
for i := 0; i < 1000; i++ {
a := &Agent{
FlushInterval: duration.Duration{30 * time.Second},
FlushJitter: duration.Duration{2 * time.Second},
}
flushinterval := jitterInterval(a.FlushInterval.Duration,
a.FlushJitter.Duration)
actual := flushinterval.Nanoseconds()
if actual < min {
t.Errorf("Didn't expect interval %d to be < %d", actual, min)
break
}
}
} }
*/

View File

@ -17,6 +17,8 @@ var fDebug = flag.Bool("debug", false,
"show metrics as they're generated to stdout") "show metrics as they're generated to stdout")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
var fConfig = flag.String("config", "", "configuration file to load") var fConfig = flag.String("config", "", "configuration file to load")
var fConfigDirectory = flag.String("configdirectory", "",
"directory containing additional configuration files")
var fVersion = flag.Bool("version", false, "display the version") var fVersion = flag.Bool("version", false, "display the version")
var fSampleConfig = flag.Bool("sample-config", false, var fSampleConfig = flag.Bool("sample-config", false,
"print out full sample configuration") "print out full sample configuration")
@ -60,7 +62,9 @@ func main() {
if *fUsage != "" { if *fUsage != "" {
if err := telegraf.PrintPluginConfig(*fUsage); err != nil { if err := telegraf.PrintPluginConfig(*fUsage); err != nil {
log.Fatal(err) if err2 := telegraf.PrintOutputConfig(*fUsage); err2 != nil {
log.Fatalf("%s and %s", err, err2)
}
} }
return return
} }
@ -81,6 +85,13 @@ func main() {
return return
} }
if *fConfigDirectory != "" {
err = config.LoadDirectory(*fConfigDirectory)
if err != nil {
log.Fatal(err)
}
}
ag, err := telegraf.NewAgent(config) ag, err := telegraf.NewAgent(config)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -90,7 +101,7 @@ func main() {
ag.Debug = true ag.Debug = true
} }
outputs, err := ag.LoadOutputs(outputFilters) outputs, err := ag.LoadOutputs(outputFilters, config)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -99,7 +110,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
plugins, err := ag.LoadPlugins(pluginFilters) plugins, err := ag.LoadPlugins(pluginFilters, config)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -132,9 +143,6 @@ func main() {
log.Printf("Starting Telegraf (version %s)\n", Version) log.Printf("Starting Telegraf (version %s)\n", Version)
log.Printf("Loaded outputs: %s", strings.Join(outputs, " ")) log.Printf("Loaded outputs: %s", strings.Join(outputs, " "))
log.Printf("Loaded plugins: %s", strings.Join(plugins, " ")) log.Printf("Loaded plugins: %s", strings.Join(plugins, " "))
log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+
"Precision:%#v, UTC: %#v\n",
ag.Interval, ag.Debug, ag.Hostname, ag.Precision, ag.UTC)
log.Printf("Tags enabled: %s", config.ListTags()) log.Printf("Tags enabled: %s", config.ListTags())
if *fPidfile != "" { if *fPidfile != "" {

565
config.go
View File

@ -4,6 +4,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"path/filepath"
"reflect"
"sort" "sort"
"strings" "strings"
"time" "time"
@ -14,41 +16,32 @@ import (
"github.com/naoina/toml/ast" "github.com/naoina/toml/ast"
) )
// Duration just wraps time.Duration
type Duration struct {
time.Duration
}
// UnmarshalTOML parses the duration from the TOML config file
func (d *Duration) UnmarshalTOML(b []byte) error {
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
if err != nil {
return err
}
d.Duration = dur
return nil
}
// Config specifies the URL/user/password for the database that telegraf // Config specifies the URL/user/password for the database that telegraf
// will be logging to, as well as all the plugins that the user has // will be logging to, as well as all the plugins that the user has
// specified // specified
type Config struct { type Config struct {
// This lives outside the agent because mergeStruct doesn't need to handle maps normally.
// We just copy the elements manually in ApplyAgent.
Tags map[string]string Tags map[string]string
agent *ast.Table agent *Agent
plugins map[string]*ast.Table plugins map[string]plugins.Plugin
outputs map[string]*ast.Table pluginConfigurations map[string]*ConfiguredPlugin
outputs map[string]outputs.Output
agentFieldsSet []string
pluginFieldsSet map[string][]string
pluginConfigurationFieldsSet map[string][]string
outputFieldsSet map[string][]string
} }
// Plugins returns the configured plugins as a map of name -> plugin toml // Plugins returns the configured plugins as a map of name -> plugins.Plugin
func (c *Config) Plugins() map[string]*ast.Table { func (c *Config) Plugins() map[string]plugins.Plugin {
return c.plugins return c.plugins
} }
// Outputs returns the configured outputs as a map of name -> output toml // Outputs returns the configured outputs as a map of name -> outputs.Output
func (c *Config) Outputs() map[string]*ast.Table { func (c *Config) Outputs() map[string]outputs.Output {
return c.outputs return c.outputs
} }
@ -123,187 +116,65 @@ func (cp *ConfiguredPlugin) ShouldPass(measurement string, tags map[string]strin
return true return true
} }
// ApplyOutput loads the toml config into the given interface // ApplyOutput loads the Output struct built from the config into the given Output struct.
// Overrides only values in the given struct that were set in the config.
func (c *Config) ApplyOutput(name string, v interface{}) error { func (c *Config) ApplyOutput(name string, v interface{}) error {
if c.outputs[name] != nil { if c.outputs[name] != nil {
return toml.UnmarshalTable(c.outputs[name], v) return mergeStruct(v, c.outputs[name], c.outputFieldsSet[name])
} }
return nil return nil
} }
// ApplyAgent loads the toml config into the given Agent object, overriding // ApplyAgent loads the Agent struct built from the config into the given Agent struct.
// defaults (such as collection duration) with the values from the toml config. // Overrides only values in the given struct that were set in the config.
func (c *Config) ApplyAgent(a *Agent) error { func (c *Config) ApplyAgent(a *Agent) error {
if c.agent != nil { if c.agent != nil {
return toml.UnmarshalTable(c.agent, a) for key, value := range c.Tags {
a.Tags[key] = value
}
return mergeStruct(a, c.agent, c.agentFieldsSet)
} }
return nil return nil
} }
// ApplyPlugin takes defined plugin names and applies them to the given // ApplyPlugin loads the Plugin struct built from the config into the given Plugin struct.
// interface, returning a ConfiguredPlugin object in the end that can // Overrides only values in the given struct that were set in the config.
// be inserted into a runningPlugin by the agent. // Additionally return a ConfiguredPlugin, which is always generated from the config.
func (c *Config) ApplyPlugin(name string, v interface{}) (*ConfiguredPlugin, error) { func (c *Config) ApplyPlugin(name string, v interface{}) (*ConfiguredPlugin, error) {
cp := &ConfiguredPlugin{Name: name} if c.plugins[name] != nil {
err := mergeStruct(v, c.plugins[name], c.pluginFieldsSet[name])
if tbl, ok := c.plugins[name]; ok {
if node, ok := tbl.Fields["pass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
cp.Pass = append(cp.Pass, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["drop"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
cp.Drop = append(cp.Drop, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return c.pluginConfigurations[name], nil
cp.Interval = dur
}
}
} }
if node, ok := tbl.Fields["tagpass"]; ok { return nil, nil
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
cp.TagPass = append(cp.TagPass, *tagfilter)
}
}
}
}
if node, ok := tbl.Fields["tagdrop"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
cp.TagDrop = append(cp.TagDrop, *tagfilter)
}
}
}
}
delete(tbl.Fields, "drop")
delete(tbl.Fields, "pass")
delete(tbl.Fields, "interval")
delete(tbl.Fields, "tagdrop")
delete(tbl.Fields, "tagpass")
return cp, toml.UnmarshalTable(tbl, v)
}
return cp, nil
} }
// Couldn't figure out how to get this to work with the declared function.
// PluginsDeclared returns the name of all plugins declared in the config. // PluginsDeclared returns the name of all plugins declared in the config.
func (c *Config) PluginsDeclared() []string { func (c *Config) PluginsDeclared() []string {
return declared(c.plugins) var names []string
for name := range c.plugins {
names = append(names, name)
}
sort.Strings(names)
return names
} }
// OutputsDeclared returns the name of all outputs declared in the config. // OutputsDeclared returns the name of all outputs declared in the config.
func (c *Config) OutputsDeclared() []string { func (c *Config) OutputsDeclared() []string {
return declared(c.outputs)
}
func declared(endpoints map[string]*ast.Table) []string {
var names []string var names []string
for name := range c.outputs {
for name := range endpoints {
names = append(names, name) names = append(names, name)
} }
sort.Strings(names) sort.Strings(names)
return names return names
} }
var errInvalidConfig = errors.New("invalid configuration")
// LoadConfig loads the given config file and returns a *Config pointer
func LoadConfig(path string) (*Config, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
tbl, err := toml.Parse(data)
if err != nil {
return nil, err
}
c := &Config{
Tags: make(map[string]string),
plugins: make(map[string]*ast.Table),
outputs: make(map[string]*ast.Table),
}
for name, val := range tbl.Fields {
subtbl, ok := val.(*ast.Table)
if !ok {
return nil, errInvalidConfig
}
switch name {
case "agent":
c.agent = subtbl
case "tags":
if err := toml.UnmarshalTable(subtbl, c.Tags); err != nil {
return nil, errInvalidConfig
}
case "outputs":
for outputName, outputVal := range subtbl.Fields {
outputSubtbl, ok := outputVal.(*ast.Table)
if !ok {
return nil, errInvalidConfig
}
c.outputs[outputName] = outputSubtbl
}
default:
c.plugins[name] = subtbl
}
}
return c, nil
}
// ListTags returns a string of tags specified in the config, // ListTags returns a string of tags specified in the config,
// line-protocol style // line-protocol style
func (c *Config) ListTags() string { func (c *Config) ListTags() string {
@ -355,12 +226,19 @@ var header = `# Telegraf configuration
[agent] [agent]
# Default data collection interval for all plugins # Default data collection interval for all plugins
interval = "10s" interval = "10s"
# If utc = false, uses local time (utc is highly recommended) # Rounds collection interval to 'interval'
utc = true # ie, if interval="10s" then always collect on :00, :10, :20, etc.
# Precision of writes, valid values are n, u, ms, s, m, and h round_interval = true
# note: using second precision greatly helps InfluxDB compression
precision = "s" # Default data flushing interval for all outputs. You should not set this below
# run telegraf in debug mode # interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
# Jitter the flush interval by a random amount. This is primarily to avoid
# large write spikes for users running a large number of telegraf instances.
# ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
# Run telegraf in debug mode
debug = false debug = false
# Override default hostname, if empty use os.Hostname() # Override default hostname, if empty use os.Hostname()
hostname = "" hostname = ""
@ -447,9 +325,14 @@ func PrintSampleConfig(pluginFilters []string, outputFilters []string) {
} }
} }
func printConfig(name string, plugin plugins.Plugin) { type printer interface {
fmt.Printf("\n# %s\n[%s]", plugin.Description(), name) Description() string
config := plugin.SampleConfig() SampleConfig() string
}
func printConfig(name string, p printer) {
fmt.Printf("\n# %s\n[%s]", p.Description(), name)
config := p.SampleConfig()
if config == "" { if config == "" {
fmt.Printf("\n # no configuration\n") fmt.Printf("\n # no configuration\n")
} else { } else {
@ -475,3 +358,325 @@ func PrintPluginConfig(name string) error {
} }
return nil return nil
} }
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string) error {
if creator, ok := outputs.Outputs[name]; ok {
printConfig(name, creator())
} else {
return errors.New(fmt.Sprintf("Output %s not found", name))
}
return nil
}
// Used for fuzzy matching struct field names in FieldByNameFunc calls below
func fieldMatch(field string) func(string) bool {
return func(name string) bool {
r := strings.NewReplacer("_", "")
return strings.ToLower(name) == strings.ToLower(r.Replace(field))
}
}
// A very limited merge. Merges the fields named in the fields parameter, replacing most values, but appending to arrays.
func mergeStruct(base, overlay interface{}, fields []string) error {
baseValue := reflect.ValueOf(base).Elem()
overlayValue := reflect.ValueOf(overlay).Elem()
if baseValue.Kind() != reflect.Struct {
return fmt.Errorf("Tried to merge something that wasn't a struct: type %v was %v", baseValue.Type(), baseValue.Kind())
}
if baseValue.Type() != overlayValue.Type() {
return fmt.Errorf("Tried to merge two different types: %v and %v", baseValue.Type(), overlayValue.Type())
}
for _, field := range fields {
overlayFieldValue := overlayValue.FieldByNameFunc(fieldMatch(field))
if !overlayFieldValue.IsValid() {
return fmt.Errorf("could not find field in %v matching %v", overlayValue.Type(), field)
}
if overlayFieldValue.Kind() == reflect.Slice {
baseFieldValue := baseValue.FieldByNameFunc(fieldMatch(field))
baseFieldValue.Set(reflect.AppendSlice(baseFieldValue, overlayFieldValue))
} else {
baseValue.FieldByNameFunc(fieldMatch(field)).Set(overlayFieldValue)
}
}
return nil
}
func (c *Config) LoadDirectory(path string) error {
directoryEntries, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, entry := range directoryEntries {
if entry.IsDir() {
continue
}
name := entry.Name()
if name[len(name)-5:] != ".conf" {
continue
}
subConfig, err := LoadConfig(filepath.Join(path, name))
if err != nil {
return err
}
if subConfig.agent != nil {
err = mergeStruct(c.agent, subConfig.agent, subConfig.agentFieldsSet)
if err != nil {
return err
}
for _, field := range subConfig.agentFieldsSet {
if !sliceContains(field, c.agentFieldsSet) {
c.agentFieldsSet = append(c.agentFieldsSet, field)
}
}
}
for pluginName, plugin := range subConfig.plugins {
if _, ok := c.plugins[pluginName]; !ok {
c.plugins[pluginName] = plugin
c.pluginFieldsSet[pluginName] = subConfig.pluginFieldsSet[pluginName]
c.pluginConfigurations[pluginName] = subConfig.pluginConfigurations[pluginName]
c.pluginConfigurationFieldsSet[pluginName] = subConfig.pluginConfigurationFieldsSet[pluginName]
continue
}
err = mergeStruct(c.plugins[pluginName], plugin, subConfig.pluginFieldsSet[pluginName])
if err != nil {
return err
}
for _, field := range subConfig.pluginFieldsSet[pluginName] {
if !sliceContains(field, c.pluginFieldsSet[pluginName]) {
c.pluginFieldsSet[pluginName] = append(c.pluginFieldsSet[pluginName], field)
}
}
err = mergeStruct(c.pluginConfigurations[pluginName], subConfig.pluginConfigurations[pluginName], subConfig.pluginConfigurationFieldsSet[pluginName])
if err != nil {
return err
}
for _, field := range subConfig.pluginConfigurationFieldsSet[pluginName] {
if !sliceContains(field, c.pluginConfigurationFieldsSet[pluginName]) {
c.pluginConfigurationFieldsSet[pluginName] = append(c.pluginConfigurationFieldsSet[pluginName], field)
}
}
}
for outputName, output := range subConfig.outputs {
if _, ok := c.outputs[outputName]; !ok {
c.outputs[outputName] = output
c.outputFieldsSet[outputName] = subConfig.outputFieldsSet[outputName]
continue
}
err = mergeStruct(c.outputs[outputName], output, subConfig.outputFieldsSet[outputName])
if err != nil {
return err
}
for _, field := range subConfig.outputFieldsSet[outputName] {
if !sliceContains(field, c.outputFieldsSet[outputName]) {
c.outputFieldsSet[outputName] = append(c.outputFieldsSet[outputName], field)
}
}
}
}
return nil
}
// hazmat area. Keeping the ast parsing here.
// LoadConfig loads the given config file and returns a *Config pointer
func LoadConfig(path string) (*Config, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
tbl, err := toml.Parse(data)
if err != nil {
return nil, err
}
c := &Config{
Tags: make(map[string]string),
plugins: make(map[string]plugins.Plugin),
pluginConfigurations: make(map[string]*ConfiguredPlugin),
outputs: make(map[string]outputs.Output),
pluginFieldsSet: make(map[string][]string),
pluginConfigurationFieldsSet: make(map[string][]string),
outputFieldsSet: make(map[string][]string),
}
for name, val := range tbl.Fields {
subtbl, ok := val.(*ast.Table)
if !ok {
return nil, errors.New("invalid configuration")
}
switch name {
case "agent":
err := c.parseAgent(subtbl)
if err != nil {
return nil, err
}
case "tags":
if err = toml.UnmarshalTable(subtbl, c.Tags); err != nil {
return nil, err
}
case "outputs":
for outputName, outputVal := range subtbl.Fields {
outputSubtbl, ok := outputVal.(*ast.Table)
if !ok {
return nil, err
}
err = c.parseOutput(outputName, outputSubtbl)
if err != nil {
return nil, err
}
}
default:
err = c.parsePlugin(name, subtbl)
if err != nil {
return nil, err
}
}
}
return c, nil
}
// Needs to have the field names, for merging later.
func extractFieldNames(ast *ast.Table) []string {
// A reasonable capacity?
var names []string
for name := range ast.Fields {
names = append(names, name)
}
return names
}
// Parse the agent config out of the given *ast.Table.
func (c *Config) parseAgent(agentAst *ast.Table) error {
c.agentFieldsSet = extractFieldNames(agentAst)
agent := &Agent{}
err := toml.UnmarshalTable(agentAst, agent)
if err != nil {
return err
}
c.agent = agent
return nil
}
// Parse an output config out of the given *ast.Table.
func (c *Config) parseOutput(name string, outputAst *ast.Table) error {
c.outputFieldsSet[name] = extractFieldNames(outputAst)
creator, ok := outputs.Outputs[name]
if !ok {
return fmt.Errorf("Undefined but requested output: %s", name)
}
output := creator()
err := toml.UnmarshalTable(outputAst, output)
if err != nil {
return err
}
c.outputs[name] = output
return nil
}
// Parse a plugin config, plus plugin meta-config, out of the given *ast.Table.
func (c *Config) parsePlugin(name string, pluginAst *ast.Table) error {
creator, ok := plugins.Plugins[name]
if !ok {
return fmt.Errorf("Undefined but requested plugin: %s", name)
}
plugin := creator()
cp := &ConfiguredPlugin{Name: name}
cpFields := make([]string, 0, 5)
if node, ok := pluginAst.Fields["pass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
cp.Pass = append(cp.Pass, str.Value)
}
}
cpFields = append(cpFields, "pass")
}
}
}
if node, ok := pluginAst.Fields["drop"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
cp.Drop = append(cp.Drop, str.Value)
}
}
cpFields = append(cpFields, "drop")
}
}
}
if node, ok := pluginAst.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return err
}
cp.Interval = dur
cpFields = append(cpFields, "interval")
}
}
}
if node, ok := pluginAst.Fields["tagpass"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
cp.TagPass = append(cp.TagPass, *tagfilter)
cpFields = append(cpFields, "tagpass")
}
}
}
}
if node, ok := pluginAst.Fields["tagdrop"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
cp.TagDrop = append(cp.TagDrop, *tagfilter)
cpFields = append(cpFields, "tagdrop")
}
}
}
}
delete(pluginAst.Fields, "drop")
delete(pluginAst.Fields, "pass")
delete(pluginAst.Fields, "interval")
delete(pluginAst.Fields, "tagdrop")
delete(pluginAst.Fields, "tagpass")
c.pluginFieldsSet[name] = extractFieldNames(pluginAst)
c.pluginConfigurationFieldsSet[name] = cpFields
err := toml.UnmarshalTable(pluginAst, plugin)
if err != nil {
return err
}
c.plugins[name] = plugin
c.pluginConfigurations[name] = cp
return nil
}

332
config_test.go Normal file
View File

@ -0,0 +1,332 @@
package telegraf
import (
"fmt"
"io/ioutil"
"testing"
"time"
"github.com/influxdb/telegraf/plugins"
"github.com/influxdb/telegraf/plugins/exec"
"github.com/influxdb/telegraf/plugins/kafka_consumer"
"github.com/influxdb/telegraf/plugins/procstat"
"github.com/naoina/toml"
"github.com/naoina/toml/ast"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
func TestConfig_fieldMatch(t *testing.T) {
assert := assert.New(t)
matchFunc := fieldMatch("testfield")
assert.True(matchFunc("testField"), "testfield should match testField")
assert.True(matchFunc("TestField"), "testfield should match TestField")
assert.True(matchFunc("TESTFIELD"), "testfield should match TESTFIELD")
assert.False(matchFunc("OtherField"), "testfield should not match OtherField")
matchFunc = fieldMatch("test_field")
assert.True(matchFunc("testField"), "test_field should match testField")
assert.True(matchFunc("TestField"), "test_field should match TestField")
assert.True(matchFunc("TESTFIELD"), "test_field should match TESTFIELD")
assert.False(matchFunc("OtherField"), "test_field should not match OtherField")
}
type subTest struct {
AField string
AnotherField int
}
type test struct {
StringField string
IntegerField int
FloatField float32
BooleanField bool
DatetimeField time.Time
ArrayField []string
TableArrayField []subTest
}
type MergeStructSuite struct {
suite.Suite
EmptyStruct *test
FullStruct *test
AnotherFullStruct *test
AllFields []string
}
func (s *MergeStructSuite) SetupSuite() {
s.AllFields = []string{"string_field", "integer_field", "float_field", "boolean_field", "date_time_field", "array_field", "table_array_field"}
}
func (s *MergeStructSuite) SetupTest() {
s.EmptyStruct = &test{
ArrayField: []string{},
TableArrayField: []subTest{},
}
s.FullStruct = &test{
StringField: "one",
IntegerField: 1,
FloatField: 1.1,
BooleanField: false,
DatetimeField: time.Date(1963, time.August, 28, 17, 0, 0, 0, time.UTC),
ArrayField: []string{"one", "two", "three"},
TableArrayField: []subTest{
subTest{
AField: "one",
AnotherField: 1,
},
subTest{
AField: "two",
AnotherField: 2,
},
},
}
s.AnotherFullStruct = &test{
StringField: "two",
IntegerField: 2,
FloatField: 2.2,
BooleanField: true,
DatetimeField: time.Date(1965, time.March, 25, 17, 0, 0, 0, time.UTC),
ArrayField: []string{"four", "five", "six"},
TableArrayField: []subTest{
subTest{
AField: "three",
AnotherField: 3,
},
subTest{
AField: "four",
AnotherField: 4,
},
},
}
}
func (s *MergeStructSuite) TestEmptyMerge() {
err := mergeStruct(s.EmptyStruct, s.FullStruct, s.AllFields)
if err != nil {
s.T().Error(err)
}
s.Equal(s.FullStruct, s.EmptyStruct, fmt.Sprintf("Full merge of %v onto an empty struct failed.", s.FullStruct))
}
func (s *MergeStructSuite) TestFullMerge() {
result := &test{
StringField: "two",
IntegerField: 2,
FloatField: 2.2,
BooleanField: true,
DatetimeField: time.Date(1965, time.March, 25, 17, 0, 0, 0, time.UTC),
ArrayField: []string{"one", "two", "three", "four", "five", "six"},
TableArrayField: []subTest{
subTest{
AField: "one",
AnotherField: 1,
},
subTest{
AField: "two",
AnotherField: 2,
},
subTest{
AField: "three",
AnotherField: 3,
},
subTest{
AField: "four",
AnotherField: 4,
},
},
}
err := mergeStruct(s.FullStruct, s.AnotherFullStruct, s.AllFields)
if err != nil {
s.T().Error(err)
}
s.Equal(result, s.FullStruct, fmt.Sprintf("Full merge of %v onto FullStruct failed.", s.AnotherFullStruct))
}
func (s *MergeStructSuite) TestPartialMergeWithoutSlices() {
result := &test{
StringField: "two",
IntegerField: 1,
FloatField: 2.2,
BooleanField: false,
DatetimeField: time.Date(1965, time.March, 25, 17, 0, 0, 0, time.UTC),
ArrayField: []string{"one", "two", "three"},
TableArrayField: []subTest{
subTest{
AField: "one",
AnotherField: 1,
},
subTest{
AField: "two",
AnotherField: 2,
},
},
}
err := mergeStruct(s.FullStruct, s.AnotherFullStruct, []string{"string_field", "float_field", "date_time_field"})
if err != nil {
s.T().Error(err)
}
s.Equal(result, s.FullStruct, fmt.Sprintf("Partial merge without slices of %v onto FullStruct failed.", s.AnotherFullStruct))
}
func (s *MergeStructSuite) TestPartialMergeWithSlices() {
result := &test{
StringField: "two",
IntegerField: 1,
FloatField: 2.2,
BooleanField: false,
DatetimeField: time.Date(1965, time.March, 25, 17, 0, 0, 0, time.UTC),
ArrayField: []string{"one", "two", "three"},
TableArrayField: []subTest{
subTest{
AField: "one",
AnotherField: 1,
},
subTest{
AField: "two",
AnotherField: 2,
},
subTest{
AField: "three",
AnotherField: 3,
},
subTest{
AField: "four",
AnotherField: 4,
},
},
}
err := mergeStruct(s.FullStruct, s.AnotherFullStruct, []string{"string_field", "float_field", "date_time_field", "table_array_field"})
if err != nil {
s.T().Error(err)
}
s.Equal(result, s.FullStruct, fmt.Sprintf("Partial merge with slices of %v onto FullStruct failed.", s.AnotherFullStruct))
}
func TestConfig_mergeStruct(t *testing.T) {
suite.Run(t, new(MergeStructSuite))
}
func TestConfig_parsePlugin(t *testing.T) {
data, err := ioutil.ReadFile("./testdata/single_plugin.toml")
if err != nil {
t.Error(err)
}
tbl, err := toml.Parse(data)
if err != nil {
t.Error(err)
}
c := &Config{
plugins: make(map[string]plugins.Plugin),
pluginConfigurations: make(map[string]*ConfiguredPlugin),
pluginFieldsSet: make(map[string][]string),
pluginConfigurationFieldsSet: make(map[string][]string),
}
subtbl := tbl.Fields["kafka"].(*ast.Table)
err = c.parsePlugin("kafka", subtbl)
kafka := plugins.Plugins["kafka"]().(*kafka_consumer.Kafka)
kafka.ConsumerGroupName = "telegraf_metrics_consumers"
kafka.Topic = "topic_with_metrics"
kafka.ZookeeperPeers = []string{"test.example.com:2181"}
kafka.BatchSize = 1000
kConfig := &ConfiguredPlugin{
Name: "kafka",
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
TagDrop: []TagFilter{
TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []TagFilter{
TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
Interval: 5 * time.Second,
}
assert.Equal(t, kafka, c.plugins["kafka"], "Testdata did not produce a correct kafka struct.")
assert.Equal(t, kConfig, c.pluginConfigurations["kafka"], "Testdata did not produce correct kafka metadata.")
}
func TestConfig_LoadDirectory(t *testing.T) {
c, err := LoadConfig("./testdata/telegraf-agent.toml")
if err != nil {
t.Error(err)
}
err = c.LoadDirectory("./testdata/subconfig")
if err != nil {
t.Error(err)
}
kafka := plugins.Plugins["kafka"]().(*kafka_consumer.Kafka)
kafka.ConsumerGroupName = "telegraf_metrics_consumers"
kafka.Topic = "topic_with_metrics"
kafka.ZookeeperPeers = []string{"localhost:2181", "test.example.com:2181"}
kafka.BatchSize = 10000
kConfig := &ConfiguredPlugin{
Name: "kafka",
Drop: []string{"other", "stuff"},
Pass: []string{"some", "strings"},
TagDrop: []TagFilter{
TagFilter{
Name: "badtag",
Filter: []string{"othertag"},
},
},
TagPass: []TagFilter{
TagFilter{
Name: "goodtag",
Filter: []string{"mytag"},
},
},
Interval: 5 * time.Second,
}
ex := plugins.Plugins["exec"]().(*exec.Exec)
ex.Commands = []*exec.Command{
&exec.Command{
Command: "/usr/bin/mycollector --foo=bar",
Name: "mycollector",
},
&exec.Command{
Command: "/usr/bin/myothercollector --foo=bar",
Name: "myothercollector",
},
}
eConfig := &ConfiguredPlugin{Name: "exec"}
pstat := plugins.Plugins["procstat"]().(*procstat.Procstat)
pstat.Specifications = []*procstat.Specification{
&procstat.Specification{
PidFile: "/var/run/grafana-server.pid",
},
&procstat.Specification{
PidFile: "/var/run/influxdb/influxd.pid",
},
}
pConfig := &ConfiguredPlugin{Name: "procstat"}
assert.Equal(t, kafka, c.plugins["kafka"], "Merged Testdata did not produce a correct kafka struct.")
assert.Equal(t, kConfig, c.pluginConfigurations["kafka"], "Merged Testdata did not produce correct kafka metadata.")
assert.Equal(t, ex, c.plugins["exec"], "Merged Testdata did not produce a correct exec struct.")
assert.Equal(t, eConfig, c.pluginConfigurations["exec"], "Merged Testdata did not produce correct exec metadata.")
assert.Equal(t, pstat, c.plugins["procstat"], "Merged Testdata did not produce a correct procstat struct.")
assert.Equal(t, pConfig, c.pluginConfigurations["procstat"], "Merged Testdata did not produce correct procstat metadata.")
}

20
duration/duration.go Normal file
View File

@ -0,0 +1,20 @@
package duration
import "time"
// Duration just wraps time.Duration
type Duration struct {
time.Duration
}
// UnmarshalTOML parses the duration from the TOML config file
func (d *Duration) UnmarshalTOML(b []byte) error {
dur, err := time.ParseDuration(string(b[1 : len(b)-1]))
if err != nil {
return err
}
d.Duration = dur
return nil
}

View File

@ -27,17 +27,20 @@
[agent] [agent]
# Default data collection interval for all plugins # Default data collection interval for all plugins
interval = "10s" interval = "10s"
# Rounds collection interval to 'interval'
# ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
# If utc = false, uses local time (utc is highly recommended) # Default data flushing interval for all outputs
utc = true flush_interval = "10s"
# Jitter the flush interval by a random range
# ie, a jitter of 5s and interval 10s means flush will happen every 10-15s
flush_jitter = "5s"
# Number of times to retry each data flush
flush_retries = 2
# Precision of writes, valid values are n, u, ms, s, m, and h # Run telegraf in debug mode
# note: using second precision greatly helps InfluxDB compression
precision = "s"
# run telegraf in debug mode
debug = false debug = false
# Override default hostname, if empty use os.Hostname() # Override default hostname, if empty use os.Hostname()
hostname = "" hostname = ""
@ -54,15 +57,16 @@
# Multiple urls can be specified for InfluxDB cluster support. Server to # Multiple urls can be specified for InfluxDB cluster support. Server to
# write to will be randomly chosen each interval. # write to will be randomly chosen each interval.
urls = ["http://localhost:8086"] # required. urls = ["http://localhost:8086"] # required.
# The target database for metrics. This database must already exist # The target database for metrics. This database must already exist
database = "telegraf" # required. database = "telegraf" # required.
# Precision of writes, valid values are n, u, ms, s, m, and h
# note: using second precision greatly helps InfluxDB compression
precision = "s"
# Connection timeout (for the connection with InfluxDB), formatted as a string. # Connection timeout (for the connection with InfluxDB), formatted as a string.
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# If not provided, will default to 0 (no timeout) # If not provided, will default to 0 (no timeout)
# timeout = "5s" # timeout = "5s"
# username = "telegraf" # username = "telegraf"
# password = "metricsmetricsmetricsmetrics" # password = "metricsmetricsmetricsmetrics"

View File

@ -5,6 +5,7 @@ import (
_ "github.com/influxdb/telegraf/outputs/datadog" _ "github.com/influxdb/telegraf/outputs/datadog"
_ "github.com/influxdb/telegraf/outputs/influxdb" _ "github.com/influxdb/telegraf/outputs/influxdb"
_ "github.com/influxdb/telegraf/outputs/kafka" _ "github.com/influxdb/telegraf/outputs/kafka"
_ "github.com/influxdb/telegraf/outputs/librato"
_ "github.com/influxdb/telegraf/outputs/mqtt" _ "github.com/influxdb/telegraf/outputs/mqtt"
_ "github.com/influxdb/telegraf/outputs/opentsdb" _ "github.com/influxdb/telegraf/outputs/opentsdb"
) )

View File

@ -4,5 +4,6 @@ This plugin writes to a AMQP exchange using tag, defined in configuration file
as RoutingTag, as a routing key. as RoutingTag, as a routing key.
If RoutingTag is empty, then empty routing key will be used. If RoutingTag is empty, then empty routing key will be used.
Metrics are grouped in batches by RoutingTag.
This plugin doesn't bind exchange to a queue, so it should be done by consumer. This plugin doesn't bind exchange to a queue, so it should be done by consumer.

View File

@ -1,12 +1,13 @@
package amqp package amqp
import ( import (
"bytes"
"fmt" "fmt"
"log" "log"
"sync" "sync"
"time" "time"
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
"github.com/influxdb/telegraf/outputs" "github.com/influxdb/telegraf/outputs"
"github.com/streadway/amqp" "github.com/streadway/amqp"
) )
@ -82,43 +83,29 @@ func (q *AMQP) Description() string {
return "Configuration for the AMQP server to send metrics to" return "Configuration for the AMQP server to send metrics to"
} }
func (q *AMQP) Write(bp client.BatchPoints) error { func (q *AMQP) Write(points []*client.Point) error {
q.Lock() q.Lock()
defer q.Unlock() defer q.Unlock()
if len(bp.Points) == 0 { if len(points) == 0 {
return nil return nil
} }
var outbuf = make(map[string][][]byte)
var zero_time time.Time for _, p := range points {
for _, p := range bp.Points {
// Combine tags from Point and BatchPoints and grab the resulting // Combine tags from Point and BatchPoints and grab the resulting
// line-protocol output string to write to AMQP // line-protocol output string to write to AMQP
var value, key string var value, key string
if p.Raw != "" { value = p.String()
value = p.Raw
} else {
for k, v := range bp.Tags {
if p.Tags == nil {
p.Tags = make(map[string]string, len(bp.Tags))
}
p.Tags[k] = v
}
if p.Time == zero_time {
if bp.Time == zero_time {
p.Time = time.Now()
} else {
p.Time = bp.Time
}
}
value = p.MarshalString()
}
if q.RoutingTag != "" { if q.RoutingTag != "" {
if h, ok := p.Tags[q.RoutingTag]; ok { if h, ok := p.Tags()[q.RoutingTag]; ok {
key = h key = h
} }
} }
outbuf[key] = append(outbuf[key], []byte(value))
}
for key, buf := range outbuf {
err := q.channel.Publish( err := q.channel.Publish(
q.Exchange, // exchange q.Exchange, // exchange
key, // routing key key, // routing key
@ -126,7 +113,7 @@ func (q *AMQP) Write(bp client.BatchPoints) error {
false, // immediate false, // immediate
amqp.Publishing{ amqp.Publishing{
ContentType: "text/plain", ContentType: "text/plain",
Body: []byte(value), Body: bytes.Join(buf, []byte("\n")),
}) })
if err != nil { if err != nil {
return fmt.Errorf("FAILED to send amqp message: %s", err) return fmt.Errorf("FAILED to send amqp message: %s", err)

View File

@ -23,6 +23,6 @@ func TestConnectAndWrite(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Verify that we can successfully write data to the amqp broker // Verify that we can successfully write data to the amqp broker
err = q.Write(testutil.MockBatchPoints()) err = q.Write(testutil.MockBatchPoints().Points())
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -0,0 +1,9 @@
# Datadog Output Plugin
This plugin writes to the [Datadog Metrics API](http://docs.datadoghq.com/api/#metrics)
and requires an `apikey` which can be obtained [here](https://app.datadoghq.com/account/settings#api)
for the account.
If the point value being sent cannot be converted to a float64, the metric is skipped.
Metrics are grouped by converting any `_` characters to `.` in the Point Name.

View File

@ -4,18 +4,20 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"log"
"net/http" "net/http"
"net/url" "net/url"
"sort" "sort"
"strings"
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
t "github.com/influxdb/telegraf" "github.com/influxdb/telegraf/duration"
"github.com/influxdb/telegraf/outputs" "github.com/influxdb/telegraf/outputs"
) )
type Datadog struct { type Datadog struct {
Apikey string Apikey string
Timeout t.Duration Timeout duration.Duration
apiUrl string apiUrl string
client *http.Client client *http.Client
@ -36,6 +38,7 @@ type TimeSeries struct {
type Metric struct { type Metric struct {
Metric string `json:"metric"` Metric string `json:"metric"`
Points [1]Point `json:"points"` Points [1]Point `json:"points"`
Host string `json:"host"`
Tags []string `json:"tags,omitempty"` Tags []string `json:"tags,omitempty"`
} }
@ -59,23 +62,29 @@ func (d *Datadog) Connect() error {
return nil return nil
} }
func (d *Datadog) Write(bp client.BatchPoints) error { func (d *Datadog) Write(points []*client.Point) error {
if len(bp.Points) == 0 { if len(points) == 0 {
return nil return nil
} }
ts := TimeSeries{ ts := TimeSeries{}
Series: make([]*Metric, len(bp.Points)), var tempSeries = make([]*Metric, len(points))
} var acceptablePoints = 0
for index, pt := range bp.Points { for _, pt := range points {
metric := &Metric{ metric := &Metric{
Metric: pt.Measurement, Metric: strings.Replace(pt.Name(), "_", ".", -1),
Tags: buildTags(bp.Tags, pt.Tags), Tags: buildTags(pt.Tags()),
Host: pt.Tags()["host"],
} }
if p, err := buildPoint(bp, pt); err == nil { if p, err := buildPoint(pt); err == nil {
metric.Points[0] = p metric.Points[0] = p
tempSeries[acceptablePoints] = metric
acceptablePoints += 1
} else {
log.Printf("unable to build Metric for %s, skipping\n", pt.Name())
} }
ts.Series[index] = metric
} }
ts.Series = make([]*Metric, acceptablePoints)
copy(ts.Series, tempSeries[0:])
tsBytes, err := json.Marshal(ts) tsBytes, err := json.Marshal(ts)
if err != nil { if err != nil {
return fmt.Errorf("unable to marshal TimeSeries, %s\n", err.Error()) return fmt.Errorf("unable to marshal TimeSeries, %s\n", err.Error())
@ -87,10 +96,10 @@ func (d *Datadog) Write(bp client.BatchPoints) error {
req.Header.Add("Content-Type", "application/json") req.Header.Add("Content-Type", "application/json")
resp, err := d.client.Do(req) resp, err := d.client.Do(req)
defer resp.Body.Close()
if err != nil { if err != nil {
return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
} }
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 209 { if resp.StatusCode < 200 || resp.StatusCode > 209 {
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode) return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
@ -114,13 +123,18 @@ func (d *Datadog) authenticatedUrl() string {
return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode()) return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode())
} }
func buildTags(bpTags map[string]string, ptTags map[string]string) []string { func buildPoint(pt *client.Point) (Point, error) {
tags := make([]string, (len(bpTags) + len(ptTags))) var p Point
index := 0 if err := p.setValue(pt.Fields()["value"]); err != nil {
for k, v := range bpTags { return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
tags[index] = fmt.Sprintf("%s:%s", k, v)
index += 1
} }
p[0] = float64(pt.Time().Unix())
return p, nil
}
func buildTags(ptTags map[string]string) []string {
tags := make([]string, len(ptTags))
index := 0
for k, v := range ptTags { for k, v := range ptTags {
tags[index] = fmt.Sprintf("%s:%s", k, v) tags[index] = fmt.Sprintf("%s:%s", k, v)
index += 1 index += 1
@ -129,19 +143,6 @@ func buildTags(bpTags map[string]string, ptTags map[string]string) []string {
return tags return tags
} }
func buildPoint(bp client.BatchPoints, pt client.Point) (Point, error) {
var p Point
if err := p.setValue(pt.Fields["value"]); err != nil {
return p, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
}
if pt.Time.IsZero() {
p[0] = float64(bp.Time.Unix())
} else {
p[0] = float64(pt.Time.Unix())
}
return p, nil
}
func (p *Point) setValue(v interface{}) error { func (p *Point) setValue(v interface{}) error {
switch d := v.(type) { switch d := v.(type) {
case int: case int:

View File

@ -11,7 +11,7 @@ import (
"github.com/influxdb/telegraf/testutil" "github.com/influxdb/telegraf/testutil"
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -38,7 +38,7 @@ func TestUriOverride(t *testing.T) {
d.Apikey = "123456" d.Apikey = "123456"
err := d.Connect() err := d.Connect()
require.NoError(t, err) require.NoError(t, err)
err = d.Write(testutil.MockBatchPoints()) err = d.Write(testutil.MockBatchPoints().Points())
require.NoError(t, err) require.NoError(t, err)
} }
@ -57,7 +57,7 @@ func TestBadStatusCode(t *testing.T) {
d.Apikey = "123456" d.Apikey = "123456"
err := d.Connect() err := d.Connect()
require.NoError(t, err) require.NoError(t, err)
err = d.Write(testutil.MockBatchPoints()) err = d.Write(testutil.MockBatchPoints().Points())
if err == nil { if err == nil {
t.Errorf("error expected but none returned") t.Errorf("error expected but none returned")
} else { } else {
@ -74,28 +74,24 @@ func TestAuthenticatedUrl(t *testing.T) {
func TestBuildTags(t *testing.T) { func TestBuildTags(t *testing.T) {
var tagtests = []struct { var tagtests = []struct {
bpIn map[string]string
ptIn map[string]string ptIn map[string]string
outTags []string outTags []string
}{ }{
{ {
map[string]string{"one": "two"}, map[string]string{"one": "two", "three": "four"},
map[string]string{"three": "four"},
[]string{"one:two", "three:four"}, []string{"one:two", "three:four"},
}, },
{ {
map[string]string{"aaa": "bbb"}, map[string]string{"aaa": "bbb"},
map[string]string{},
[]string{"aaa:bbb"}, []string{"aaa:bbb"},
}, },
{ {
map[string]string{},
map[string]string{}, map[string]string{},
[]string{}, []string{},
}, },
} }
for _, tt := range tagtests { for _, tt := range tagtests {
tags := buildTags(tt.bpIn, tt.ptIn) tags := buildTags(tt.ptIn)
if !reflect.DeepEqual(tags, tt.outTags) { if !reflect.DeepEqual(tags, tt.outTags) {
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags) t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags)
} }
@ -103,92 +99,114 @@ func TestBuildTags(t *testing.T) {
} }
func TestBuildPoint(t *testing.T) { func TestBuildPoint(t *testing.T) {
tags := make(map[string]string)
var tagtests = []struct { var tagtests = []struct {
bpIn client.BatchPoints ptIn *client.Point
ptIn client.Point
outPt Point outPt Point
err error err error
}{ }{
{ {
client.BatchPoints{ client.NewPoint(
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), "test1",
tags,
map[string]interface{}{"value": 0.0},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
0.0,
}, },
client.Point{
Fields: map[string]interface{}{"value": 0.0},
},
Point{float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 0.0},
nil, nil,
}, },
{ {
client.BatchPoints{}, client.NewPoint(
client.Point{ "test2",
Fields: map[string]interface{}{"value": 1.0}, tags,
Time: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC), map[string]interface{}{"value": 1.0},
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC),
),
Point{
float64(time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix()),
1.0,
}, },
Point{float64(time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix()), 1.0},
nil, nil,
}, },
{ {
client.BatchPoints{ client.NewPoint(
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), "test3",
tags,
map[string]interface{}{"value": 10},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
10.0,
}, },
client.Point{
Fields: map[string]interface{}{"value": 10},
},
Point{float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 10.0},
nil, nil,
}, },
{ {
client.BatchPoints{ client.NewPoint(
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), "test4",
tags,
map[string]interface{}{"value": int32(112345)},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
112345.0,
}, },
client.Point{
Fields: map[string]interface{}{"value": int32(112345)},
},
Point{float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 112345.0},
nil, nil,
}, },
{ {
client.BatchPoints{ client.NewPoint(
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), "test5",
tags,
map[string]interface{}{"value": int64(112345)},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
112345.0,
}, },
client.Point{
Fields: map[string]interface{}{"value": int64(112345)},
},
Point{float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 112345.0},
nil, nil,
}, },
{ {
client.BatchPoints{ client.NewPoint(
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), "test6",
tags,
map[string]interface{}{"value": float32(11234.5)},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
11234.5,
}, },
client.Point{
Fields: map[string]interface{}{"value": float32(11234.5)},
},
Point{float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 11234.5},
nil, nil,
}, },
{ {
client.BatchPoints{ client.NewPoint(
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), "test7",
tags,
map[string]interface{}{"value": "11234.5"},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
11234.5,
}, },
client.Point{
Fields: map[string]interface{}{"value": "11234.5"},
},
Point{float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 11234.5},
fmt.Errorf("unable to extract value from Fields, undeterminable type"), fmt.Errorf("unable to extract value from Fields, undeterminable type"),
}, },
} }
for _, tt := range tagtests { for _, tt := range tagtests {
pt, err := buildPoint(tt.bpIn, tt.ptIn) pt, err := buildPoint(tt.ptIn)
if err != nil && tt.err == nil { if err != nil && tt.err == nil {
t.Errorf("unexpected error, %+v\n", err) t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
} }
if tt.err != nil && err == nil { if tt.err != nil && err == nil {
t.Errorf("expected an error (%s) but none returned", tt.err.Error()) t.Errorf("%s: expected an error (%s) but none returned", tt.ptIn.Name(), tt.err.Error())
} }
if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil { if !reflect.DeepEqual(pt, tt.outPt) && tt.err == nil {
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outPt, pt) t.Errorf("%s: \nexpected %+v\ngot %+v\n", tt.ptIn.Name(), tt.outPt, pt)
} }
} }
} }

View File

@ -8,8 +8,8 @@ import (
"net/url" "net/url"
"strings" "strings"
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
t "github.com/influxdb/telegraf" "github.com/influxdb/telegraf/duration"
"github.com/influxdb/telegraf/outputs" "github.com/influxdb/telegraf/outputs"
) )
@ -21,9 +21,10 @@ type InfluxDB struct {
Password string Password string
Database string Database string
UserAgent string UserAgent string
Timeout t.Duration Precision string
Timeout duration.Duration
conns []*client.Client conns []client.Client
} }
var sampleConfig = ` var sampleConfig = `
@ -32,9 +33,11 @@ var sampleConfig = `
urls = ["http://localhost:8086"] # required urls = ["http://localhost:8086"] # required
# The target database for metrics (telegraf will create it if not exists) # The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required database = "telegraf" # required
# Precision of writes, valid values are n, u, ms, s, m, and h
# note: using second precision greatly helps InfluxDB compression
precision = "s"
# Connection timeout (for the connection with InfluxDB), formatted as a string. # Connection timeout (for the connection with InfluxDB), formatted as a string.
# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# If not provided, will default to 0 (no timeout) # If not provided, will default to 0 (no timeout)
# timeout = "5s" # timeout = "5s"
# username = "telegraf" # username = "telegraf"
@ -63,39 +66,32 @@ func (i *InfluxDB) Connect() error {
urls = append(urls, u) urls = append(urls, u)
} }
var conns []*client.Client var conns []client.Client
for _, parsed_url := range urls { for _, parsed_url := range urls {
c, err := client.NewClient(client.Config{ c := client.NewClient(client.Config{
URL: *parsed_url, URL: parsed_url,
Username: i.Username, Username: i.Username,
Password: i.Password, Password: i.Password,
UserAgent: i.UserAgent, UserAgent: i.UserAgent,
Timeout: i.Timeout.Duration, Timeout: i.Timeout.Duration,
}) })
if err != nil {
return err
}
conns = append(conns, c) conns = append(conns, c)
} }
// This will get set to nil if a successful connection is made
err := errors.New("Could not create database on any server")
for _, conn := range conns { for _, conn := range conns {
_, e := conn.Query(client.Query{ _, e := conn.Query(client.Query{
Command: fmt.Sprintf("CREATE DATABASE %s", i.Database), Command: fmt.Sprintf("CREATE DATABASE %s", i.Database),
}) })
if e != nil && !strings.Contains(e.Error(), "database already exists") { if e != nil && !strings.Contains(e.Error(), "database already exists") {
log.Println("ERROR: " + e.Error()) log.Println("Database creation failed: " + e.Error())
} else { } else {
err = nil
break break
} }
} }
i.conns = conns i.conns = conns
return err return nil
} }
func (i *InfluxDB) Close() error { func (i *InfluxDB) Close() error {
@ -113,15 +109,22 @@ func (i *InfluxDB) Description() string {
// Choose a random server in the cluster to write to until a successful write // Choose a random server in the cluster to write to until a successful write
// occurs, logging each unsuccessful. If all servers fail, return error. // occurs, logging each unsuccessful. If all servers fail, return error.
func (i *InfluxDB) Write(bp client.BatchPoints) error { func (i *InfluxDB) Write(points []*client.Point) error {
bp.Database = i.Database bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: i.Database,
Precision: i.Precision,
})
for _, point := range points {
bp.AddPoint(point)
}
// This will get set to nil if a successful write occurs // This will get set to nil if a successful write occurs
err := errors.New("Could not write to any InfluxDB server in cluster") err := errors.New("Could not write to any InfluxDB server in cluster")
p := rand.Perm(len(i.conns)) p := rand.Perm(len(i.conns))
for _, n := range p { for _, n := range p {
if _, e := i.conns[n].Write(bp); e != nil { if e := i.conns[n].Write(bp); e != nil {
log.Println("ERROR: " + e.Error()) log.Println("ERROR: " + e.Error())
} else { } else {
err = nil err = nil

View File

@ -3,10 +3,9 @@ package kafka
import ( import (
"errors" "errors"
"fmt" "fmt"
"time"
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
"github.com/influxdb/telegraf/outputs" "github.com/influxdb/telegraf/outputs"
) )
@ -52,40 +51,21 @@ func (k *Kafka) Description() string {
return "Configuration for the Kafka server to send metrics to" return "Configuration for the Kafka server to send metrics to"
} }
func (k *Kafka) Write(bp client.BatchPoints) error { func (k *Kafka) Write(points []*client.Point) error {
if len(bp.Points) == 0 { if len(points) == 0 {
return nil return nil
} }
var zero_time time.Time for _, p := range points {
for _, p := range bp.Points {
// Combine tags from Point and BatchPoints and grab the resulting // Combine tags from Point and BatchPoints and grab the resulting
// line-protocol output string to write to Kafka // line-protocol output string to write to Kafka
var value string value := p.String()
if p.Raw != "" {
value = p.Raw
} else {
for k, v := range bp.Tags {
if p.Tags == nil {
p.Tags = make(map[string]string, len(bp.Tags))
}
p.Tags[k] = v
}
if p.Time == zero_time {
if bp.Time == zero_time {
p.Time = time.Now()
} else {
p.Time = bp.Time
}
}
value = p.MarshalString()
}
m := &sarama.ProducerMessage{ m := &sarama.ProducerMessage{
Topic: k.Topic, Topic: k.Topic,
Value: sarama.StringEncoder(value), Value: sarama.StringEncoder(value),
} }
if h, ok := p.Tags[k.RoutingTag]; ok { if h, ok := p.Tags()[k.RoutingTag]; ok {
m.Key = sarama.StringEncoder(h) m.Key = sarama.StringEncoder(h)
} }

View File

@ -23,6 +23,6 @@ func TestConnectAndWrite(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Verify that we can successfully write data to the kafka broker // Verify that we can successfully write data to the kafka broker
err = k.Write(testutil.MockBatchPoints()) err = k.Write(testutil.MockBatchPoints().Points())
require.NoError(t, err) require.NoError(t, err)
} }

12
outputs/librato/README.md Normal file
View File

@ -0,0 +1,12 @@
# Librato Output Plugin
This plugin writes to the [Librato Metrics API](http://dev.librato.com/v1/metrics#metrics)
and requires an `api_user` and `api_token` which can be obtained [here](https://metrics.librato.com/account/api_tokens)
for the account.
The `source_tag` option in the Configuration file is used to send contextual information from
Point Tags to the API.
If the point value being sent cannot be converted to a float64, the metric is skipped.
Currently, the plugin does not send any associated Point Tags.

165
outputs/librato/librato.go Normal file
View File

@ -0,0 +1,165 @@
package librato
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"github.com/influxdb/influxdb/client/v2"
"github.com/influxdb/telegraf/duration"
"github.com/influxdb/telegraf/outputs"
)
type Librato struct {
ApiUser string
ApiToken string
SourceTag string
Timeout duration.Duration
apiUrl string
client *http.Client
}
var sampleConfig = `
# Librator API Docs
# http://dev.librato.com/v1/metrics-authentication
# Librato API user
api_user = "telegraf@influxdb.com" # required.
# Librato API token
api_token = "my-secret-token" # required.
# Tag Field to populate source attribute (optional)
# This is typically the _hostname_ from which the metric was obtained.
source_tag = "hostname"
# Connection timeout.
# timeout = "5s"
`
type Metrics struct {
Gauges []*Gauge `json:"gauges"`
}
type Gauge struct {
Name string `json:"name"`
Value float64 `json:"value"`
Source string `json:"source"`
MeasureTime int64 `json:"measure_time"`
}
const librato_api = "https://metrics-api.librato.com/v1/metrics"
func NewLibrato(apiUrl string) *Librato {
return &Librato{
apiUrl: apiUrl,
}
}
func (l *Librato) Connect() error {
if l.ApiUser == "" || l.ApiToken == "" {
return fmt.Errorf("api_user and api_token are required fields for librato output")
}
l.client = &http.Client{
Timeout: l.Timeout.Duration,
}
return nil
}
func (l *Librato) Write(points []*client.Point) error {
if len(points) == 0 {
return nil
}
metrics := Metrics{}
var tempGauges = make([]*Gauge, len(points))
var acceptablePoints = 0
for _, pt := range points {
if gauge, err := l.buildGauge(pt); err == nil {
tempGauges[acceptablePoints] = gauge
acceptablePoints += 1
} else {
log.Printf("unable to build Gauge for %s, skipping\n", pt.Name())
}
}
metrics.Gauges = make([]*Gauge, acceptablePoints)
copy(metrics.Gauges, tempGauges[0:])
metricsBytes, err := json.Marshal(metrics)
if err != nil {
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
}
req, err := http.NewRequest("POST", l.apiUrl, bytes.NewBuffer(metricsBytes))
if err != nil {
return fmt.Errorf("unable to create http.Request, %s\n", err.Error())
}
req.Header.Add("Content-Type", "application/json")
req.SetBasicAuth(l.ApiUser, l.ApiToken)
resp, err := l.client.Do(req)
if err != nil {
return fmt.Errorf("error POSTing metrics, %s\n", err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("received bad status code, %d\n", resp.StatusCode)
}
return nil
}
func (l *Librato) SampleConfig() string {
return sampleConfig
}
func (l *Librato) Description() string {
return "Configuration for Librato API to send metrics to."
}
func (l *Librato) buildGauge(pt *client.Point) (*Gauge, error) {
gauge := &Gauge{
Name: pt.Name(),
MeasureTime: pt.Time().Unix(),
}
if err := gauge.setValue(pt.Fields()["value"]); err != nil {
return gauge, fmt.Errorf("unable to extract value from Fields, %s\n", err.Error())
}
if l.SourceTag != "" {
if source, ok := pt.Tags()[l.SourceTag]; ok {
gauge.Source = source
} else {
return gauge, fmt.Errorf("undeterminable Source type from Field, %s\n", l.SourceTag)
}
}
return gauge, nil
}
func (g *Gauge) setValue(v interface{}) error {
switch d := v.(type) {
case int:
g.Value = float64(int(d))
case int32:
g.Value = float64(int32(d))
case int64:
g.Value = float64(int64(d))
case float32:
g.Value = float64(d)
case float64:
g.Value = float64(d)
default:
return fmt.Errorf("undeterminable type %+v", d)
}
return nil
}
func (l *Librato) Close() error {
return nil
}
func init() {
outputs.Add("librato", func() outputs.Output {
return NewLibrato(librato_api)
})
}

View File

@ -0,0 +1,245 @@
package librato
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"time"
"github.com/influxdb/telegraf/testutil"
"github.com/influxdb/influxdb/client/v2"
"github.com/stretchr/testify/require"
)
var (
fakeUrl = "http://test.librato.com"
fakeUser = "telegraf@influxdb.com"
fakeToken = "123456"
)
func fakeLibrato() *Librato {
l := NewLibrato(fakeUrl)
l.ApiUser = fakeUser
l.ApiToken = fakeToken
return l
}
func TestUriOverride(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer ts.Close()
l := NewLibrato(ts.URL)
l.ApiUser = "telegraf@influxdb.com"
l.ApiToken = "123456"
err := l.Connect()
require.NoError(t, err)
err = l.Write(testutil.MockBatchPoints().Points())
require.NoError(t, err)
}
func TestBadStatusCode(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
json.NewEncoder(w).Encode(`{
"errors": {
"system": [
"The API is currently down for maintenance. It'll be back shortly."
]
}
}`)
}))
defer ts.Close()
l := NewLibrato(ts.URL)
l.ApiUser = "telegraf@influxdb.com"
l.ApiToken = "123456"
err := l.Connect()
require.NoError(t, err)
err = l.Write(testutil.MockBatchPoints().Points())
if err == nil {
t.Errorf("error expected but none returned")
} else {
require.EqualError(t, fmt.Errorf("received bad status code, 503\n"), err.Error())
}
}
func TestBuildGauge(t *testing.T) {
tags := make(map[string]string)
var gaugeTests = []struct {
ptIn *client.Point
outGauge *Gauge
err error
}{
{
client.NewPoint(
"test1",
tags,
map[string]interface{}{"value": 0.0},
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
),
&Gauge{
Name: "test1",
MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Value: 0.0,
},
nil,
},
{
client.NewPoint(
"test2",
tags,
map[string]interface{}{"value": 1.0},
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC),
),
&Gauge{
Name: "test2",
MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(),
Value: 1.0,
},
nil,
},
{
client.NewPoint(
"test3",
tags,
map[string]interface{}{"value": 10},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
&Gauge{
Name: "test3",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Value: 10.0,
},
nil,
},
{
client.NewPoint(
"test4",
tags,
map[string]interface{}{"value": int32(112345)},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
&Gauge{
Name: "test4",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Value: 112345.0,
},
nil,
},
{
client.NewPoint(
"test5",
tags,
map[string]interface{}{"value": int64(112345)},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
&Gauge{
Name: "test5",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Value: 112345.0,
},
nil,
},
{
client.NewPoint(
"test6",
tags,
map[string]interface{}{"value": float32(11234.5)},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
&Gauge{
Name: "test6",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Value: 11234.5,
},
nil,
},
{
client.NewPoint(
"test7",
tags,
map[string]interface{}{"value": "11234.5"},
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
),
&Gauge{
Name: "test7",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Value: 11234.5,
},
fmt.Errorf("unable to extract value from Fields, undeterminable type"),
},
}
l := NewLibrato(fakeUrl)
for _, gt := range gaugeTests {
gauge, err := l.buildGauge(gt.ptIn)
if err != nil && gt.err == nil {
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
}
if gt.err != nil && err == nil {
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
}
if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil {
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge)
}
}
}
func TestBuildGaugeWithSource(t *testing.T) {
var gaugeTests = []struct {
ptIn *client.Point
outGauge *Gauge
err error
}{
{
client.NewPoint(
"test1",
map[string]string{"hostname": "192.168.0.1"},
map[string]interface{}{"value": 0.0},
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
),
&Gauge{
Name: "test1",
MeasureTime: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
Value: 0.0,
Source: "192.168.0.1",
},
nil,
},
{
client.NewPoint(
"test2",
map[string]string{"hostnam": "192.168.0.1"},
map[string]interface{}{"value": 1.0},
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC),
),
&Gauge{
Name: "test2",
MeasureTime: time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC).Unix(),
Value: 1.0,
},
fmt.Errorf("undeterminable Source type from Field, hostname"),
},
}
l := NewLibrato(fakeUrl)
l.SourceTag = "hostname"
for _, gt := range gaugeTests {
gauge, err := l.buildGauge(gt.ptIn)
if err != nil && gt.err == nil {
t.Errorf("%s: unexpected error, %+v\n", gt.ptIn.Name(), err)
}
if gt.err != nil && err == nil {
t.Errorf("%s: expected an error (%s) but none returned", gt.ptIn.Name(), gt.err.Error())
}
if !reflect.DeepEqual(gauge, gt.outGauge) && gt.err == nil {
t.Errorf("%s: \nexpected %+v\ngot %+v\n", gt.ptIn.Name(), gt.outGauge, gauge)
}
}
}

View File

@ -10,8 +10,8 @@ import (
"sync" "sync"
paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
t "github.com/influxdb/telegraf" "github.com/influxdb/telegraf/duration"
"github.com/influxdb/telegraf/outputs" "github.com/influxdb/telegraf/outputs"
) )
@ -24,7 +24,7 @@ type MQTT struct {
Username string Username string
Password string Password string
Database string Database string
Timeout t.Duration Timeout duration.Duration
TopicPrefix string TopicPrefix string
Client *paho.Client Client *paho.Client
@ -78,35 +78,31 @@ func (m *MQTT) Description() string {
return "Configuration for MQTT server to send metrics to" return "Configuration for MQTT server to send metrics to"
} }
func (m *MQTT) Write(bp client.BatchPoints) error { func (m *MQTT) Write(points []*client.Point) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
if len(bp.Points) == 0 { if len(points) == 0 {
return nil return nil
} }
hostname, ok := bp.Tags["host"] hostname, ok := points[0].Tags()["host"]
if !ok { if !ok {
hostname = "" hostname = ""
} }
for _, p := range bp.Points { for _, p := range points {
var t []string var t []string
if m.TopicPrefix != "" { if m.TopicPrefix != "" {
t = append(t, m.TopicPrefix) t = append(t, m.TopicPrefix)
} }
tm := strings.Split(p.Measurement, "_") tm := strings.Split(p.Name(), "_")
if len(tm) < 2 { if len(tm) < 2 {
tm = []string{p.Measurement, "stat"} tm = []string{p.Name(), "stat"}
} }
t = append(t, "host", hostname, tm[0], tm[1]) t = append(t, "host", hostname, tm[0], tm[1])
topic := strings.Join(t, "/") topic := strings.Join(t, "/")
var value string value := p.String()
if p.Raw != "" {
value = p.Raw
} else {
value = getValue(p.Fields["value"])
}
err := m.publish(topic, value) err := m.publish(topic, value)
if err != nil { if err != nil {
return fmt.Errorf("Could not write to MQTT server, %s", err) return fmt.Errorf("Could not write to MQTT server, %s", err)
@ -116,23 +112,6 @@ func (m *MQTT) Write(bp client.BatchPoints) error {
return nil return nil
} }
func getValue(v interface{}) string {
var ret string
switch v.(type) {
default:
ret = fmt.Sprintf("%v", v)
case bool:
ret = fmt.Sprintf("%t", v)
case float32, float64:
ret = fmt.Sprintf("%f", v)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
ret = fmt.Sprintf("%d", v)
case string, []byte:
ret = fmt.Sprintf("%s", v)
}
return ret
}
func (m *MQTT) publish(topic, body string) error { func (m *MQTT) publish(topic, body string) error {
token := m.Client.Publish(topic, 0, false, body) token := m.Client.Publish(topic, 0, false, body)
token.Wait() token.Wait()

View File

@ -8,7 +8,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
"github.com/influxdb/telegraf/outputs" "github.com/influxdb/telegraf/outputs"
) )
@ -51,15 +51,15 @@ func (o *OpenTSDB) Connect() error {
return fmt.Errorf("OpenTSDB: TCP address cannot be resolved") return fmt.Errorf("OpenTSDB: TCP address cannot be resolved")
} }
connection, err := net.DialTCP("tcp", nil, tcpAddr) connection, err := net.DialTCP("tcp", nil, tcpAddr)
defer connection.Close()
if err != nil { if err != nil {
return fmt.Errorf("OpenTSDB: Telnet connect fail") return fmt.Errorf("OpenTSDB: Telnet connect fail")
} }
defer connection.Close()
return nil return nil
} }
func (o *OpenTSDB) Write(bp client.BatchPoints) error { func (o *OpenTSDB) Write(points []*client.Point) error {
if len(bp.Points) == 0 { if len(points) == 0 {
return nil return nil
} }
var timeNow = time.Now() var timeNow = time.Now()
@ -70,19 +70,20 @@ func (o *OpenTSDB) Write(bp client.BatchPoints) error {
if err != nil { if err != nil {
return fmt.Errorf("OpenTSDB: Telnet connect fail") return fmt.Errorf("OpenTSDB: Telnet connect fail")
} }
for _, pt := range bp.Points { for _, pt := range points {
metric := &MetricLine{ metric := &MetricLine{
Metric: fmt.Sprintf("%s%s", o.Prefix, pt.Measurement), Metric: fmt.Sprintf("%s%s", o.Prefix, pt.Name()),
Timestamp: timeNow.Unix(), Timestamp: timeNow.Unix(),
} }
metricValue, buildError := buildValue(bp, pt)
metricValue, buildError := buildValue(pt)
if buildError != nil { if buildError != nil {
fmt.Printf("OpenTSDB: %s\n", buildError.Error()) fmt.Printf("OpenTSDB: %s\n", buildError.Error())
continue continue
} }
metric.Value = metricValue metric.Value = metricValue
tagsSlice := buildTags(bp.Tags, pt.Tags) tagsSlice := buildTags(pt.Tags())
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags) messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
@ -99,13 +100,9 @@ func (o *OpenTSDB) Write(bp client.BatchPoints) error {
return nil return nil
} }
func buildTags(bpTags map[string]string, ptTags map[string]string) []string { func buildTags(ptTags map[string]string) []string {
tags := make([]string, (len(bpTags) + len(ptTags))) tags := make([]string, len(ptTags))
index := 0 index := 0
for k, v := range bpTags {
tags[index] = fmt.Sprintf("%s=%s", k, v)
index += 1
}
for k, v := range ptTags { for k, v := range ptTags {
tags[index] = fmt.Sprintf("%s=%s", k, v) tags[index] = fmt.Sprintf("%s=%s", k, v)
index += 1 index += 1
@ -114,9 +111,9 @@ func buildTags(bpTags map[string]string, ptTags map[string]string) []string {
return tags return tags
} }
func buildValue(bp client.BatchPoints, pt client.Point) (string, error) { func buildValue(pt *client.Point) (string, error) {
var retv string var retv string
var v = pt.Fields["value"] var v = pt.Fields()["value"]
switch p := v.(type) { switch p := v.(type) {
case int64: case int64:
retv = IntToString(int64(p)) retv = IntToString(int64(p))

View File

@ -3,47 +3,42 @@ package opentsdb
import ( import (
"reflect" "reflect"
"testing" "testing"
"time"
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
"github.com/influxdb/telegraf/testutil" "github.com/influxdb/telegraf/testutil"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestBuildTagsTelnet(t *testing.T) { func TestBuildTagsTelnet(t *testing.T) {
var tagtests = []struct { var tagtests = []struct {
bpIn map[string]string
ptIn map[string]string ptIn map[string]string
outTags []string outTags []string
}{ }{
{ {
map[string]string{"one": "two"}, map[string]string{"one": "two", "three": "four"},
map[string]string{"three": "four"},
[]string{"one=two", "three=four"}, []string{"one=two", "three=four"},
}, },
{ {
map[string]string{"aaa": "bbb"}, map[string]string{"aaa": "bbb"},
map[string]string{},
[]string{"aaa=bbb"}, []string{"aaa=bbb"},
}, },
{ {
map[string]string{"one": "two"}, map[string]string{"one": "two", "aaa": "bbb"},
map[string]string{"aaa": "bbb"},
[]string{"aaa=bbb", "one=two"}, []string{"aaa=bbb", "one=two"},
}, },
{ {
map[string]string{},
map[string]string{}, map[string]string{},
[]string{}, []string{},
}, },
} }
for _, tt := range tagtests { for _, tt := range tagtests {
tags := buildTags(tt.bpIn, tt.ptIn) tags := buildTags(tt.ptIn)
if !reflect.DeepEqual(tags, tt.outTags) { if !reflect.DeepEqual(tags, tt.outTags) {
t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags) t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags)
} }
} }
} }
func TestWrite(t *testing.T) { func TestWrite(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("Skipping integration test in short mode") t.Skip("Skipping integration test in short mode")
@ -51,7 +46,7 @@ func TestWrite(t *testing.T) {
o := &OpenTSDB{ o := &OpenTSDB{
Host: testutil.GetLocalHost(), Host: testutil.GetLocalHost(),
Port: 24242, Port: 4242,
Prefix: "prefix.test.", Prefix: "prefix.test.",
} }
@ -60,36 +55,24 @@ func TestWrite(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Verify that we can successfully write data to OpenTSDB // Verify that we can successfully write data to OpenTSDB
err = o.Write(testutil.MockBatchPoints()) err = o.Write(testutil.MockBatchPoints().Points())
require.NoError(t, err) require.NoError(t, err)
// Verify postive and negative test cases of writing data // Verify postive and negative test cases of writing data
var bp client.BatchPoints bp := testutil.MockBatchPoints()
bp.Time = time.Now() tags := make(map[string]string)
bp.Tags = map[string]string{"testkey": "testvalue"} bp.AddPoint(client.NewPoint("justametric.float", tags,
bp.Points = []client.Point{ map[string]interface{}{"value": float64(1.0)}))
{ bp.AddPoint(client.NewPoint("justametric.int", tags,
Measurement: "justametric.float", map[string]interface{}{"value": int64(123456789)}))
Fields: map[string]interface{}{"value": float64(1.0)}, bp.AddPoint(client.NewPoint("justametric.uint", tags,
}, map[string]interface{}{"value": uint64(123456789012345)}))
{ bp.AddPoint(client.NewPoint("justametric.string", tags,
Measurement: "justametric.int", map[string]interface{}{"value": "Lorem Ipsum"}))
Fields: map[string]interface{}{"value": int64(123456789)}, bp.AddPoint(client.NewPoint("justametric.anotherfloat", tags,
}, map[string]interface{}{"value": float64(42.0)}))
{
Measurement: "justametric.uint", err = o.Write(bp.Points())
Fields: map[string]interface{}{"value": uint64(123456789012345)},
},
{
Measurement: "justametric.string",
Fields: map[string]interface{}{"value": "Lorem Ipsum"},
},
{
Measurement: "justametric.anotherfloat",
Fields: map[string]interface{}{"value": float64(42.0)},
},
}
err = o.Write(bp)
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -1,7 +1,7 @@
package outputs package outputs
import ( import (
"github.com/influxdb/influxdb/client" "github.com/influxdb/influxdb/client/v2"
) )
type Output interface { type Output interface {
@ -9,7 +9,7 @@ type Output interface {
Close() error Close() error
Description() string Description() string
SampleConfig() string SampleConfig() string
Write(client.BatchPoints) error Write(points []*client.Point) error
} }
type Creator func() Output type Creator func() Output

265
plugins/aerospike/README.md Normal file
View File

@ -0,0 +1,265 @@
## Telegraf Plugin: Aerospike
#### Plugin arguments:
- **servers** string array: List of aerospike servers to query (def: 127.0.0.1:3000)
#### Description
The aerospike plugin queries aerospike server(s) and get node statistics. It also collects stats for
all the configured namespaces.
For what the measurements mean, please consult the [Aerospike Metrics Reference Docs](http://www.aerospike.com/docs/reference/metrics).
The metric names, to make it less complicated in querying, have replaced all `-` with `_` as Aerospike metrics come in both forms (no idea why).
# Measurements:
#### Aerospike Statistics [values]:
Meta:
- units: Integer
Measurement names:
- batch_index_queue
- batch_index_unused_buffers
- batch_queue
- batch_tree_count
- client_connections
- data_used_bytes_memory
- index_used_bytes_memory
- info_queue
- migrate_progress_recv
- migrate_progress_send
- migrate_rx_objs
- migrate_tx_objs
- objects
- ongoing_write_reqs
- partition_absent
- partition_actual
- partition_desync
- partition_object_count
- partition_ref_count
- partition_replica
- proxy_in_progress
- query_agg_avg_rec_count
- query_avg_rec_count
- query_lookup_avg_rec_count
- queue
- record_locks
- record_refs
- sindex_used_bytes_memory
- sindex_gc_garbage_cleaned
- system_free_mem_pct
- total_bytes_disk
- total_bytes_memory
- tree_count
- scans_active
- uptime
- used_bytes_disk
- used_bytes_memory
- cluster_size
- waiting_transactions
#### Aerospike Statistics [cumulative]:
Meta:
- units: Integer
Measurement names:
- batch_errors
- batch_index_complete
- batch_index_errors
- batch_index_initiate
- batch_index_timeout
- batch_initiate
- batch_timeout
- err_duplicate_proxy_request
- err_out_of_space
- err_replica_non_null_node
- err_replica_null_node
- err_rw_cant_put_unique
- err_rw_pending_limit
- err_rw_request_not_found
- err_storage_queue_full
- err_sync_copy_null_master
- err_sync_copy_null_node
- err_tsvc_requests
- err_write_fail_bin_exists
- err_write_fail_generation
- err_write_fail_generation_xdr
- err_write_fail_incompatible_type
- err_write_fail_key_exists
- err_write_fail_key_mismatch
- err_write_fail_not_found
- err_write_fail_noxdr
- err_write_fail_parameter
- err_write_fail_prole_delete
- err_write_fail_prole_generation
- err_write_fail_prole_unknown
- err_write_fail_unknown
- fabric_msgs_rcvd
- fabric_msgs_sent
- heartbeat_received_foreign
- heartbeat_received_self
- migrate_msgs_recv
- migrate_msgs_sent
- migrate_num_incoming_accepted
- migrate_num_incoming_refused
- proxy_action
- proxy_initiate
- proxy_retry
- proxy_retry_new_dest
- proxy_retry_q_full
- proxy_retry_same_dest
- proxy_unproxy
- query_abort
- query_agg
- query_agg_abort
- query_agg_err
- query_agg_success
- query_bad_records
- query_fail
- query_long_queue_full
- query_long_running
- query_lookup_abort
- query_lookup_err
- query_lookups
- query_lookup_success
- query_reqs
- query_short_queue_full
- query_short_running
- query_success
- query_tracked
- read_dup_prole
- reaped_fds
- rw_err_ack_badnode
- rw_err_ack_internal
- rw_err_ack_nomatch
- rw_err_dup_cluster_key
- rw_err_dup_internal
- rw_err_dup_send
- rw_err_write_cluster_key
- rw_err_write_internal
- rw_err_write_send
- sindex_ucgarbage_found
- sindex_gc_locktimedout
- sindex_gc_inactivity_dur
- sindex_gc_activity_dur
- sindex_gc_list_creation_time
- sindex_gc_list_deletion_time
- sindex_gc_objects_validated
- sindex_gc_garbage_found
- stat_cluster_key_err_ack_dup_trans_reenqueue
- stat_cluster_key_err_ack_rw_trans_reenqueue
- stat_cluster_key_prole_retry
- stat_cluster_key_regular_processed
- stat_cluster_key_trans_to_proxy_retry
- stat_deleted_set_object
- stat_delete_success
- stat_duplicate_operation
- stat_evicted_objects
- stat_evicted_objects_time
- stat_evicted_set_objects
- stat_expired_objects
- stat_nsup_deletes_not_shipped
- stat_proxy_errs
- stat_proxy_reqs
- stat_proxy_reqs_xdr
- stat_proxy_success
- stat_read_errs_notfound
- stat_read_errs_other
- stat_read_reqs
- stat_read_reqs_xdr
- stat_read_success
- stat_rw_timeout
- stat_slow_trans_queue_batch_pop
- stat_slow_trans_queue_pop
- stat_slow_trans_queue_push
- stat_write_errs
- stat_write_errs_notfound
- stat_write_errs_other
- stat_write_reqs
- stat_write_reqs_xdr
- stat_write_success
- stat_xdr_pipe_miss
- stat_xdr_pipe_writes
- stat_zero_bin_records
- storage_defrag_corrupt_record
- storage_defrag_wait
- transactions
- basic_scans_succeeded
- basic_scans_failed
- aggr_scans_succeeded
- aggr_scans_failed
- udf_bg_scans_succeeded
- udf_bg_scans_failed
- udf_delete_err_others
- udf_delete_reqs
- udf_delete_success
- udf_lua_errs
- udf_query_rec_reqs
- udf_read_errs_other
- udf_read_reqs
- udf_read_success
- udf_replica_writes
- udf_scan_rec_reqs
- udf_write_err_others
- udf_write_reqs
- udf_write_success
- write_master
- write_prole
#### Aerospike Statistics [percentage]:
Meta:
- units: percent (out of 100)
Measurement names:
- free_pct_disk
- free_pct_memory
# Measurements:
#### Aerospike Namespace Statistics [values]:
Meta:
- units: Integer
- tags: `namespace=<namespace>`
Measurement names:
- available_bin_names
- available_pct
- current_time
- data_used_bytes_memory
- index_used_bytes_memory
- master_objects
- max_evicted_ttl
- max_void_time
- non_expirable_objects
- objects
- prole_objects
- sindex_used_bytes_memory
- total_bytes_disk
- total_bytes_memory
- used_bytes_disk
- used_bytes_memory
#### Aerospike Namespace Statistics [cumulative]:
Meta:
- units: Integer
- tags: `namespace=<namespace>`
Measurement names:
- evicted_objects
- expired_objects
- set_deleted_objects
- set_evicted_objects
#### Aerospike Namespace Statistics [percentage]:
Meta:
- units: percent (out of 100)
- tags: `namespace=<namespace>`
Measurement names:
- free_pct_disk
- free_pct_memory

View File

@ -0,0 +1,335 @@
package aerospike
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/influxdb/telegraf/plugins"
"net"
"strconv"
"strings"
"sync"
)
const (
MSG_HEADER_SIZE = 8
MSG_TYPE = 1 // Info is 1
MSG_VERSION = 2
)
var (
STATISTICS_COMMAND = []byte("statistics\n")
NAMESPACES_COMMAND = []byte("namespaces\n")
)
type aerospikeMessageHeader struct {
Version uint8
Type uint8
DataLen [6]byte
}
type aerospikeMessage struct {
aerospikeMessageHeader
Data []byte
}
// Taken from aerospike-client-go/types/message.go
func (msg *aerospikeMessage) Serialize() []byte {
msg.DataLen = msgLenToBytes(int64(len(msg.Data)))
buf := bytes.NewBuffer([]byte{})
binary.Write(buf, binary.BigEndian, msg.aerospikeMessageHeader)
binary.Write(buf, binary.BigEndian, msg.Data[:])
return buf.Bytes()
}
type aerospikeInfoCommand struct {
msg *aerospikeMessage
}
// Taken from aerospike-client-go/info.go
func (nfo *aerospikeInfoCommand) parseMultiResponse() (map[string]string, error) {
responses := make(map[string]string)
offset := int64(0)
begin := int64(0)
dataLen := int64(len(nfo.msg.Data))
// Create reusable StringBuilder for performance.
for offset < dataLen {
b := nfo.msg.Data[offset]
if b == '\t' {
name := nfo.msg.Data[begin:offset]
offset++
begin = offset
// Parse field value.
for offset < dataLen {
if nfo.msg.Data[offset] == '\n' {
break
}
offset++
}
if offset > begin {
value := nfo.msg.Data[begin:offset]
responses[string(name)] = string(value)
} else {
responses[string(name)] = ""
}
offset++
begin = offset
} else if b == '\n' {
if offset > begin {
name := nfo.msg.Data[begin:offset]
responses[string(name)] = ""
}
offset++
begin = offset
} else {
offset++
}
}
if offset > begin {
name := nfo.msg.Data[begin:offset]
responses[string(name)] = ""
}
return responses, nil
}
type Aerospike struct {
Servers []string
}
var sampleConfig = `
# Aerospike servers to connect to (with port)
# Default: servers = ["localhost:3000"]
#
# This plugin will query all namespaces the aerospike
# server has configured and get stats for them.
servers = ["localhost:3000"]
`
func (a *Aerospike) SampleConfig() string {
return sampleConfig
}
func (a *Aerospike) Description() string {
return "Read stats from an aerospike server"
}
func (a *Aerospike) Gather(acc plugins.Accumulator) error {
if len(a.Servers) == 0 {
return a.gatherServer("127.0.0.1:3000", acc)
}
var wg sync.WaitGroup
var outerr error
for _, server := range a.Servers {
wg.Add(1)
go func(server string) {
defer wg.Done()
outerr = a.gatherServer(server, acc)
}(server)
}
wg.Wait()
return outerr
}
func (a *Aerospike) gatherServer(host string, acc plugins.Accumulator) error {
aerospikeInfo, err := getMap(STATISTICS_COMMAND, host)
if err != nil {
return fmt.Errorf("Aerospike info failed: %s", err)
}
readAerospikeStats(aerospikeInfo, acc, host, "")
namespaces, err := getList(NAMESPACES_COMMAND, host)
if err != nil {
return fmt.Errorf("Aerospike namespace list failed: %s", err)
}
for ix := range namespaces {
nsInfo, err := getMap([]byte("namespace/"+namespaces[ix]+"\n"), host)
if err != nil {
return fmt.Errorf("Aerospike namespace '%s' query failed: %s", namespaces[ix], err)
}
readAerospikeStats(nsInfo, acc, host, namespaces[ix])
}
return nil
}
func getMap(key []byte, host string) (map[string]string, error) {
data, err := get(key, host)
if err != nil {
return nil, fmt.Errorf("Failed to get data: %s", err)
}
parsed, err := unmarshalMapInfo(data, string(key))
if err != nil {
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
}
return parsed, nil
}
func getList(key []byte, host string) ([]string, error) {
data, err := get(key, host)
if err != nil {
return nil, fmt.Errorf("Failed to get data: %s", err)
}
parsed, err := unmarshalListInfo(data, string(key))
if err != nil {
return nil, fmt.Errorf("Failed to unmarshal data: %s", err)
}
return parsed, nil
}
func get(key []byte, host string) (map[string]string, error) {
var err error
var data map[string]string
asInfo := &aerospikeInfoCommand{
msg: &aerospikeMessage{
aerospikeMessageHeader: aerospikeMessageHeader{
Version: uint8(MSG_VERSION),
Type: uint8(MSG_TYPE),
DataLen: msgLenToBytes(int64(len(key))),
},
Data: key,
},
}
cmd := asInfo.msg.Serialize()
addr, err := net.ResolveTCPAddr("tcp", host)
if err != nil {
return data, fmt.Errorf("Lookup failed for '%s': %s", host, err)
}
conn, err := net.DialTCP("tcp", nil, addr)
if err != nil {
return data, fmt.Errorf("Connection failed for '%s': %s", host, err)
}
defer conn.Close()
_, err = conn.Write(cmd)
if err != nil {
return data, fmt.Errorf("Failed to send to '%s': %s", host, err)
}
msgHeader := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE))
_, err = readLenFromConn(conn, msgHeader.Bytes(), MSG_HEADER_SIZE)
if err != nil {
return data, fmt.Errorf("Failed to read header: %s", err)
}
err = binary.Read(msgHeader, binary.BigEndian, &asInfo.msg.aerospikeMessageHeader)
if err != nil {
return data, fmt.Errorf("Failed to unmarshal header: %s", err)
}
msgLen := msgLenFromBytes(asInfo.msg.aerospikeMessageHeader.DataLen)
if int64(len(asInfo.msg.Data)) != msgLen {
asInfo.msg.Data = make([]byte, msgLen)
}
_, err = readLenFromConn(conn, asInfo.msg.Data, len(asInfo.msg.Data))
if err != nil {
return data, fmt.Errorf("Failed to read from connection to '%s': %s", host, err)
}
data, err = asInfo.parseMultiResponse()
if err != nil {
return data, fmt.Errorf("Failed to parse response from '%s': %s", host, err)
}
return data, err
}
func readAerospikeStats(stats map[string]string, acc plugins.Accumulator, host, namespace string) {
for key, value := range stats {
tags := map[string]string{
"host": host,
}
if namespace != "" {
tags["namespace"] = namespace
}
// We are going to ignore all string based keys
val, err := strconv.ParseInt(value, 10, 64)
if err == nil {
if strings.Contains(key, "-") {
key = strings.Replace(key, "-", "_", -1)
}
acc.Add(key, val, tags)
}
}
}
func unmarshalMapInfo(infoMap map[string]string, key string) (map[string]string, error) {
key = strings.TrimSuffix(key, "\n")
res := map[string]string{}
v, exists := infoMap[key]
if !exists {
return res, fmt.Errorf("Key '%s' missing from info", key)
}
values := strings.Split(v, ";")
for i := range values {
kv := strings.Split(values[i], "=")
if len(kv) > 1 {
res[kv[0]] = kv[1]
}
}
return res, nil
}
func unmarshalListInfo(infoMap map[string]string, key string) ([]string, error) {
key = strings.TrimSuffix(key, "\n")
v, exists := infoMap[key]
if !exists {
return []string{}, fmt.Errorf("Key '%s' missing from info", key)
}
values := strings.Split(v, ";")
return values, nil
}
func readLenFromConn(c net.Conn, buffer []byte, length int) (total int, err error) {
var r int
for total < length {
r, err = c.Read(buffer[total:length])
total += r
if err != nil {
break
}
}
return
}
// Taken from aerospike-client-go/types/message.go
func msgLenToBytes(DataLen int64) [6]byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(DataLen))
res := [6]byte{}
copy(res[:], b[2:])
return res
}
// Taken from aerospike-client-go/types/message.go
func msgLenFromBytes(buf [6]byte) int64 {
nbytes := append([]byte{0, 0}, buf[:]...)
DataLen := binary.BigEndian.Uint64(nbytes)
return int64(DataLen)
}
func init() {
plugins.Add("aerospike", func() plugins.Plugin {
return &Aerospike{}
})
}

View File

@ -0,0 +1,112 @@
package aerospike
import (
"github.com/influxdb/telegraf/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"reflect"
"testing"
)
func TestAerospikeStatistics(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
a := &Aerospike{
Servers: []string{testutil.GetLocalHost() + ":3000"},
}
var acc testutil.Accumulator
err := a.Gather(&acc)
require.NoError(t, err)
// Only use a few of the metrics
asMetrics := []string{
"transactions",
"stat_write_errs",
"stat_read_reqs",
"stat_write_reqs",
}
for _, metric := range asMetrics {
assert.True(t, acc.HasIntValue(metric), metric)
}
}
func TestAerospikeMsgLenFromToBytes(t *testing.T) {
var i int64 = 8
assert.True(t, i == msgLenFromBytes(msgLenToBytes(i)))
}
func TestReadAerospikeStatsNoNamespace(t *testing.T) {
// Also test for re-writing
var acc testutil.Accumulator
stats := map[string]string{
"stat-write-errs": "12345",
"stat_read_reqs": "12345",
}
readAerospikeStats(stats, &acc, "host1", "")
for k := range stats {
if k == "stat-write-errs" {
k = "stat_write_errs"
}
assert.True(t, acc.HasMeasurement(k))
assert.True(t, acc.CheckValue(k, int64(12345)))
}
}
func TestReadAerospikeStatsNamespace(t *testing.T) {
var acc testutil.Accumulator
stats := map[string]string{
"stat_write_errs": "12345",
"stat_read_reqs": "12345",
}
readAerospikeStats(stats, &acc, "host1", "test")
tags := map[string]string{
"host": "host1",
"namespace": "test",
}
for k := range stats {
assert.True(t, acc.ValidateTaggedValue(k, int64(12345), tags) == nil)
}
}
func TestAerospikeUnmarshalList(t *testing.T) {
i := map[string]string{
"test": "one;two;three",
}
expected := []string{"one", "two", "three"}
list, err := unmarshalListInfo(i, "test2")
assert.True(t, err != nil)
list, err = unmarshalListInfo(i, "test")
assert.True(t, err == nil)
equal := true
for ix := range expected {
if list[ix] != expected[ix] {
equal = false
break
}
}
assert.True(t, equal)
}
func TestAerospikeUnmarshalMap(t *testing.T) {
i := map[string]string{
"test": "key1=value1;key2=value2",
}
expected := map[string]string{
"key1": "value1",
"key2": "value2",
}
m, err := unmarshalMapInfo(i, "test")
assert.True(t, err == nil)
assert.True(t, reflect.DeepEqual(m, expected))
}

View File

@ -1,6 +1,7 @@
package all package all
import ( import (
_ "github.com/influxdb/telegraf/plugins/aerospike"
_ "github.com/influxdb/telegraf/plugins/apache" _ "github.com/influxdb/telegraf/plugins/apache"
_ "github.com/influxdb/telegraf/plugins/bcache" _ "github.com/influxdb/telegraf/plugins/bcache"
_ "github.com/influxdb/telegraf/plugins/disque" _ "github.com/influxdb/telegraf/plugins/disque"

View File

@ -3,11 +3,13 @@ package exec
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"github.com/gonuts/go-shellquote" "github.com/gonuts/go-shellquote"
"github.com/influxdb/telegraf/plugins" "github.com/influxdb/telegraf/plugins"
"math" "math"
"os/exec" "os/exec"
"strings"
"sync" "sync"
"time" "time"
) )
@ -88,19 +90,32 @@ func (e *Exec) Description() string {
func (e *Exec) Gather(acc plugins.Accumulator) error { func (e *Exec) Gather(acc plugins.Accumulator) error {
var wg sync.WaitGroup var wg sync.WaitGroup
var outerr error errorChannel := make(chan error, len(e.Commands))
for _, c := range e.Commands { for _, c := range e.Commands {
wg.Add(1) wg.Add(1)
go func(c *Command, acc plugins.Accumulator) { go func(c *Command, acc plugins.Accumulator) {
defer wg.Done() defer wg.Done()
outerr = e.gatherCommand(c, acc) err := e.gatherCommand(c, acc)
if err != nil {
errorChannel <- err
}
}(c, acc) }(c, acc)
} }
wg.Wait() wg.Wait()
close(errorChannel)
return outerr // Get all errors and return them as one giant error
errorStrings := []string{}
for err := range errorChannel {
errorStrings = append(errorStrings, err.Error())
}
if len(errorStrings) == 0 {
return nil
}
return errors.New(strings.Join(errorStrings, "\n"))
} }
func (e *Exec) gatherCommand(c *Command, acc plugins.Accumulator) error { func (e *Exec) gatherCommand(c *Command, acc plugins.Accumulator) error {

View File

@ -0,0 +1,69 @@
# HTTP JSON Plugin
The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats.
For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats http://my.service.com/_stats:
```
[[httpjson.services]]
name = "mycollector"
servers = [
"http://my.service.com/_stats"
]
# HTTP method to use (case-sensitive)
method = "GET"
```
The name is used as a prefix for the measurements.
The `method` specifies HTTP method to use for requests.
You can specify which keys from server response should be considered as tags:
```
[[httpjson.services]]
...
tag_keys = [
"role",
"version"
]
```
**NOTE**: tag values should be strings.
You can also specify additional request parameters for the service:
```
[[httpjson.services]]
...
[httpjson.services.parameters]
event_type = "cpu_spike"
threshold = "0.75"
```
# Sample
Let's say that we have a service named "mycollector", which responds with:
```json
{
"a": 0.5,
"b": {
"c": "some text",
"d": 0.1,
"e": 5
}
}
```
The collected metrics will be:
```
httpjson_mycollector_a value=0.5
httpjson_mycollector_b_d value=0.1
httpjson_mycollector_b_e value=5
```

View File

@ -22,6 +22,7 @@ type Service struct {
Name string Name string
Servers []string Servers []string
Method string Method string
TagKeys []string
Parameters map[string]string Parameters map[string]string
} }
@ -61,6 +62,12 @@ var sampleConfig = `
# HTTP method to use (case-sensitive) # HTTP method to use (case-sensitive)
method = "GET" method = "GET"
# List of tag names to extract from top-level of JSON server response
# tag_keys = [
# "my_tag_1",
# "my_tag_2"
# ]
# HTTP parameters (all values must be strings) # HTTP parameters (all values must be strings)
[httpjson.services.parameters] [httpjson.services.parameters]
event_type = "cpu_spike" event_type = "cpu_spike"
@ -126,7 +133,7 @@ func (h *HttpJson) gatherServer(acc plugins.Accumulator, service Service, server
return err return err
} }
var jsonOut interface{} var jsonOut map[string]interface{}
if err = json.Unmarshal([]byte(resp), &jsonOut); err != nil { if err = json.Unmarshal([]byte(resp), &jsonOut); err != nil {
return errors.New("Error decoding JSON response") return errors.New("Error decoding JSON response")
} }
@ -135,6 +142,14 @@ func (h *HttpJson) gatherServer(acc plugins.Accumulator, service Service, server
"server": serverURL, "server": serverURL,
} }
for _, tag := range service.TagKeys {
switch v := jsonOut[tag].(type) {
case string:
tags[tag] = v
}
delete(jsonOut, tag)
}
processResponse(acc, service.Name, tags, jsonOut) processResponse(acc, service.Name, tags, jsonOut)
return nil return nil
} }

View File

@ -28,6 +28,13 @@ const validJSON = `
} }
}` }`
const validJSONTags = `
{
"value": 15,
"role": "master",
"build": "123"
}`
const invalidJSON = "I don't think this is JSON" const invalidJSON = "I don't think this is JSON"
const empty = "" const empty = ""
@ -87,8 +94,8 @@ func genMockHttpJson(response string, statusCode int) *HttpJson {
}, },
Service{ Service{
Servers: []string{ Servers: []string{
"http://server1.example.com/metrics/", "http://server3.example.com/metrics/",
"http://server2.example.com/metrics/", "http://server4.example.com/metrics/",
}, },
Name: "other_webapp", Name: "other_webapp",
Method: "POST", Method: "POST",
@ -96,6 +103,10 @@ func genMockHttpJson(response string, statusCode int) *HttpJson {
"httpParam1": "12", "httpParam1": "12",
"httpParam2": "the second parameter", "httpParam2": "the second parameter",
}, },
TagKeys: []string{
"role",
"build",
},
}, },
}, },
} }
@ -185,3 +196,28 @@ func TestHttpJsonEmptyResponse(t *testing.T) {
assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4) assert.Equal(t, len(strings.Split(err.Error(), "\n")), 4)
assert.Equal(t, 0, len(acc.Points)) assert.Equal(t, 0, len(acc.Points))
} }
// Test that the proper values are ignored or collected
func TestHttpJson200Tags(t *testing.T) {
httpjson := genMockHttpJson(validJSONTags, 200)
var acc testutil.Accumulator
err := httpjson.Gather(&acc)
require.NoError(t, err)
assert.Equal(t, 4, len(acc.Points))
for _, service := range httpjson.Services {
if service.Name == "other_webapp" {
for _, srv := range service.Servers {
require.NoError(t,
acc.ValidateTaggedValue(
fmt.Sprintf("%s_value", service.Name),
15.0,
map[string]string{"server": srv, "role": "master", "build": "123"},
),
)
}
}
}
}

View File

@ -9,16 +9,16 @@ from the same topic in parallel.
## Testing ## Testing
Running integration tests requires running Zookeeper & Kafka. The following Running integration tests requires running Zookeeper & Kafka. The following
commands assume you're on OS X & using [boot2docker](http://boot2docker.io/). commands assume you're on OS X & using [boot2docker](http://boot2docker.io/) or docker-machine through [Docker Toolbox](https://www.docker.com/docker-toolbox).
To start Kafka & Zookeeper: To start Kafka & Zookeeper:
``` ```
docker run -d -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`boot2docker ip` --env ADVERTISED_PORT=9092 spotify/kafka docker run -d -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=`boot2docker ip || docker-machine ip <your_machine_name>` --env ADVERTISED_PORT=9092 spotify/kafka
``` ```
To run tests: To run tests:
``` ```
ZOOKEEPER_PEERS=$(boot2docker ip):2181 KAFKA_PEERS=$(boot2docker ip):9092 go test go test
``` ```

View File

@ -93,7 +93,7 @@ func emitMetrics(k *Kafka, acc plugins.Accumulator, metricConsumer <-chan []byte
} }
for _, point := range points { for _, point := range points {
acc.AddFieldsWithTime(point.Name(), point.Fields(), point.Tags(), point.Time()) acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
} }
case <-timeout: case <-timeout:
return nil return nil

View File

@ -29,7 +29,10 @@ func TestReadsMetricsFromKafka(t *testing.T) {
msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257" msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
producer, err := sarama.NewSyncProducer(brokerPeers, nil) producer, err := sarama.NewSyncProducer(brokerPeers, nil)
require.NoError(t, err) require.NoError(t, err)
_, _, err = producer.SendMessage(&sarama.ProducerMessage{Topic: k.Topic, Value: sarama.StringEncoder(msg)}) _, _, err = producer.SendMessage(&sarama.ProducerMessage{Topic: k.Topic, Value: sarama.StringEncoder(msg)})
require.NoError(t, err)
producer.Close() producer.Close()
var acc testutil.Accumulator var acc testutil.Accumulator
@ -50,5 +53,5 @@ func TestReadsMetricsFromKafka(t *testing.T) {
"direction": "in", "direction": "in",
"region": "us-west", "region": "us-west",
}, point.Tags) }, point.Tags)
assert.Equal(t, time.Unix(0, 1422568543702900257), point.Time) assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
} }

View File

@ -89,7 +89,7 @@ func (d *MongodbData) addStat(acc plugins.Accumulator, statLine reflect.Value, s
} }
func (d *MongodbData) add(acc plugins.Accumulator, key string, val interface{}) { func (d *MongodbData) add(acc plugins.Accumulator, key string, val interface{}) {
acc.AddFieldsWithTime( acc.AddFields(
key, key,
map[string]interface{}{ map[string]interface{}{
"value": val, "value": val,

View File

@ -367,7 +367,11 @@ func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage {
} }
func diff(newVal, oldVal, sampleTime int64) int64 { func diff(newVal, oldVal, sampleTime int64) int64 {
return (newVal - oldVal) / sampleTime d := newVal - oldVal
if d <= 0 {
d = newVal
}
return d / sampleTime
} }
// NewStatLine constructs a StatLine object from two ServerStatus objects. // NewStatLine constructs a StatLine object from two ServerStatus objects.

View File

@ -16,12 +16,13 @@ type Mysql struct {
var sampleConfig = ` var sampleConfig = `
# specify servers via a url matching: # specify servers via a url matching:
# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] # [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# see https://github.com/go-sql-driver/mysql#dsn-data-source-name
# e.g. # e.g.
# root:root@http://10.0.0.18/?tls=false # root:passwd@tcp(127.0.0.1:3306)/?tls=false
# root:passwd@tcp(127.0.0.1:3036)/ # root@tcp(127.0.0.1:3306)/?tls=false
# #
# If no servers are specified, then localhost is used as the host. # If no servers are specified, then localhost is used as the host.
servers = ["localhost"] servers = ["tcp(127.0.0.1:3306)/"]
` `
func (m *Mysql) SampleConfig() string { func (m *Mysql) SampleConfig() string {
@ -113,7 +114,10 @@ var mappings = []*mapping{
} }
func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
if serv == "localhost" { // If user forgot the '/', add it
if strings.HasSuffix(serv, ")") {
serv = serv + "/"
} else if serv == "localhost" {
serv = "" serv = ""
} }
@ -129,14 +133,10 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error {
return err return err
} }
// Parse out user/password from server address tag if given
var servtag string var servtag string
if strings.Contains(serv, "@") { servtag, err = parseDSN(serv)
servtag = strings.Split(serv, "@")[1] if err != nil {
} else if serv == "" {
servtag = "localhost" servtag = "localhost"
} else {
servtag = serv
} }
for rows.Next() { for rows.Next() {
var name string var name string

View File

@ -28,7 +28,7 @@ func TestMysqlGeneratesMetrics(t *testing.T) {
prefix string prefix string
count int count int
}{ }{
{"commands", 141}, {"commands", 147},
{"handler", 18}, {"handler", 18},
{"bytes", 2}, {"bytes", 2},
{"innodb", 51}, {"innodb", 51},
@ -81,3 +81,62 @@ func TestMysqlDefaultsToLocal(t *testing.T) {
assert.True(t, len(acc.Points) > 0) assert.True(t, len(acc.Points) > 0)
} }
func TestMysqlParseDSN(t *testing.T) {
tests := []struct {
input string
output string
}{
{
"",
"127.0.0.1:3306",
},
{
"localhost",
"127.0.0.1:3306",
},
{
"127.0.0.1",
"127.0.0.1:3306",
},
{
"tcp(192.168.1.1:3306)/",
"192.168.1.1:3306",
},
{
"tcp(localhost)/",
"localhost",
},
{
"root:passwd@tcp(192.168.1.1:3306)/?tls=false",
"192.168.1.1:3306",
},
{
"root@tcp(127.0.0.1:3306)/?tls=false",
"127.0.0.1:3306",
},
{
"root:passwd@tcp(localhost:3036)/dbname?allowOldPasswords=1",
"localhost:3036",
},
{
"root:foo@bar@tcp(192.1.1.1:3306)/?tls=false",
"192.1.1.1:3306",
},
{
"root:f00@b4r@tcp(192.1.1.1:3306)/?tls=false",
"192.1.1.1:3306",
},
{
"root:fl!p11@tcp(192.1.1.1:3306)/?tls=false",
"192.1.1.1:3306",
},
}
for _, test := range tests {
output, _ := parseDSN(test.input)
if output != test.output {
t.Errorf("Expected %s, got %s\n", test.output, output)
}
}
}

View File

@ -0,0 +1,85 @@
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"errors"
"strings"
)
// parseDSN parses the DSN string to a config
func parseDSN(dsn string) (string, error) {
//var user, passwd string
var addr, net string
// [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
// Find the last '/' (since the password or the net addr might contain a '/')
for i := len(dsn) - 1; i >= 0; i-- {
if dsn[i] == '/' {
var j, k int
// left part is empty if i <= 0
if i > 0 {
// [username[:password]@][protocol[(address)]]
// Find the last '@' in dsn[:i]
for j = i; j >= 0; j-- {
if dsn[j] == '@' {
// username[:password]
// Find the first ':' in dsn[:j]
for k = 0; k < j; k++ {
if dsn[k] == ':' {
//passwd = dsn[k+1 : j]
break
}
}
//user = dsn[:k]
break
}
}
// [protocol[(address)]]
// Find the first '(' in dsn[j+1:i]
for k = j + 1; k < i; k++ {
if dsn[k] == '(' {
// dsn[i-1] must be == ')' if an address is specified
if dsn[i-1] != ')' {
if strings.ContainsRune(dsn[k+1:i], ')') {
return "", errors.New("Invalid DSN unescaped")
}
return "", errors.New("Invalid DSN Addr")
}
addr = dsn[k+1 : i-1]
break
}
}
net = dsn[j+1 : k]
}
break
}
}
// Set default network if empty
if net == "" {
net = "tcp"
}
// Set default address if empty
if addr == "" {
switch net {
case "tcp":
addr = "127.0.0.1:3306"
case "unix":
addr = "/tmp/mysql.sock"
default:
return "", errors.New("Default addr for network '" + net + "' unknown")
}
}
return addr, nil
}

View File

@ -6,17 +6,15 @@ type Accumulator interface {
// Create a point with a value, decorating it with tags // Create a point with a value, decorating it with tags
// NOTE: tags is expected to be owned by the caller, don't mutate // NOTE: tags is expected to be owned by the caller, don't mutate
// it after passing to Add. // it after passing to Add.
Add(measurement string, value interface{}, tags map[string]string) Add(measurement string,
value interface{},
// Create a point with a set of values, decorating it with tags
// NOTE: tags and values are expected to be owned by the caller, don't mutate
// them after passing to AddFieldsWithTime.
AddFieldsWithTime(
measurement string,
values map[string]interface{},
tags map[string]string, tags map[string]string,
timestamp time.Time, t ...time.Time)
)
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
} }
type Plugin interface { type Plugin interface {

View File

@ -22,19 +22,20 @@ func TestZookeeperGeneratesMetrics(t *testing.T) {
err := z.Gather(&acc) err := z.Gather(&acc)
require.NoError(t, err) require.NoError(t, err)
intMetrics := []string{"zookeeper_avg_latency", intMetrics := []string{
"zookeeper_max_latency", "avg_latency",
"zookeeper_min_latency", "max_latency",
"zookeeper_packets_received", "min_latency",
"zookeeper_packets_sent", "packets_received",
"zookeeper_outstanding_requests", "packets_sent",
"zookeeper_znode_count", "outstanding_requests",
"zookeeper_watch_count", "znode_count",
"zookeeper_ephemerals_count", "watch_count",
"zookeeper_approximate_data_size", "ephemerals_count",
"zookeeper_pending_syncs", "approximate_data_size",
"zookeeper_open_file_descriptor_count", "open_file_descriptor_count",
"zookeeper_max_file_descriptor_count"} "max_file_descriptor_count",
}
for _, metric := range intMetrics { for _, metric := range intMetrics {
assert.True(t, acc.HasIntValue(metric), metric) assert.True(t, acc.HasIntValue(metric), metric)

View File

@ -1,5 +1,5 @@
mysql: mysql:
image: mysql image: mysql:5.7
ports: ports:
- "3306:3306" - "3306:3306"
environment: environment:
@ -34,11 +34,16 @@ rabbitmq:
- "5672:5672" - "5672:5672"
opentsdb: opentsdb:
image: lancope/opentsdb image: petergrace/opentsdb-docker
ports: ports:
- "24242:4242" - "4242:4242"
redis: redis:
image: redis image: redis
ports: ports:
- "6379:6379" - "6379:6379"
aerospike:
image: aerospike/aerospike-server
ports:
- "3000:3000"

View File

@ -111,6 +111,7 @@ fi
# Configuration file # Configuration file
config=/etc/opt/telegraf/telegraf.conf config=/etc/opt/telegraf/telegraf.conf
confdir=/etc/opt/telegraf/telegraf.d
# If the daemon is not there, then exit. # If the daemon is not there, then exit.
[ -x $daemon ] || exit 5 [ -x $daemon ] || exit 5
@ -136,9 +137,9 @@ case $1 in
log_success_msg "Starting the process" "$name" log_success_msg "Starting the process" "$name"
if which start-stop-daemon > /dev/null 2>&1; then if which start-stop-daemon > /dev/null 2>&1; then
start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config -configdirectory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR &
else else
nohup $daemon -pidfile $pidfile -config $config $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & nohup $daemon -pidfile $pidfile -config $config -configdirectory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR &
fi fi
log_success_msg "$name process was started" log_success_msg "$name process was started"
;; ;;

Some files were not shown because too many files have changed in this diff Show More