From 143ec1a01915a7b2c46ff07d3e326c0a92792ca8 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 24 Aug 2015 10:39:15 -0600 Subject: [PATCH 001/125] Filter out the 'v' from the version tag, issue #134 --- package.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.sh b/package.sh index 57a96d251..b36e482e2 100755 --- a/package.sh +++ b/package.sh @@ -188,7 +188,7 @@ if [ "$1" == "-h" ]; then usage 0 fi -VERSION=`git describe --always --tags` +VERSION=`git describe --always --tags | tr -d v` echo -e "\nStarting package process, version: $VERSION\n" From 9777aa6165af9f3ab4c3af78f6840597523e22f0 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 24 Aug 2015 10:48:21 -0600 Subject: [PATCH 002/125] Update README to point to url without 'v' prepended to version --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 796aecb20..9fa599e14 100644 --- a/README.md +++ b/README.md @@ -30,8 +30,8 @@ are some InfluxDB compatibility requirements: * InfluxDB 0.9.2 and prior requires Telegraf 0.1.4 Latest: -* http://get.influxdb.org/telegraf/telegraf_v0.1.6_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-v0.1.6-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.1.6_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.1.6-1.x86_64.rpm 0.1.4: * http://get.influxdb.org/telegraf/telegraf_0.1.4_amd64.deb From 1daa059ef9e8af912a71c525f2031115912ec287 Mon Sep 17 00:00:00 2001 From: nickscript0 Date: Sat, 22 Aug 2015 14:03:30 -0600 Subject: [PATCH 003/125] Log plugin errors in crankParallel and crankSeparate cases. Previously errors weren't logged in these cases. --- agent.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/agent.go b/agent.go index 1c90bf1a4..3f8132018 100644 --- a/agent.go +++ b/agent.go @@ -179,7 +179,11 @@ func (a *Agent) crankParallel() error { acc.Prefix = plugin.name + "_" acc.Config = plugin.config - plugin.plugin.Gather(&acc) + err := plugin.plugin.Gather(&acc) + if err != nil { + log.Printf("Error in plugins: %s", err) + } + points <- &acc }(plugin) @@ -333,7 +337,10 @@ func (a *Agent) Run(shutdown chan struct{}) error { wg.Add(1) go func(plugin *runningPlugin) { defer wg.Done() - a.crankSeparate(shutdown, plugin) + err := a.crankSeparate(shutdown, plugin) + if err != nil { + log.Printf("Error in plugins: %s", err) + } }(plugin) } } From afe366d6b73b18fb21c1df95cffaa1b53c3159e2 Mon Sep 17 00:00:00 2001 From: nickscript0 Date: Sat, 22 Aug 2015 15:04:34 -0600 Subject: [PATCH 004/125] go fmt remove whitespace --- agent.go | 1 - 1 file changed, 1 deletion(-) diff --git a/agent.go b/agent.go index 3f8132018..9337a3d48 100644 --- a/agent.go +++ b/agent.go @@ -184,7 +184,6 @@ func (a *Agent) crankParallel() error { log.Printf("Error in plugins: %s", err) } - points <- &acc }(plugin) } From f45f7e56fd9b736224d5eeb306eb0b44325fcfe0 Mon Sep 17 00:00:00 2001 From: nsvarich Date: Mon, 24 Aug 2015 11:25:15 -0600 Subject: [PATCH 005/125] add plugin.name to error message --- agent.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/agent.go b/agent.go index 9337a3d48..b35fae390 100644 --- a/agent.go +++ b/agent.go @@ -181,7 +181,7 @@ func (a *Agent) crankParallel() error { err := plugin.plugin.Gather(&acc) if err != nil { - log.Printf("Error in plugins: %s", err) + log.Printf("Error in plugin [%s]: %s", plugin.name, err) } points <- &acc @@ -338,7 +338,7 @@ func (a *Agent) Run(shutdown chan struct{}) error { defer wg.Done() err := a.crankSeparate(shutdown, plugin) if err != nil { - log.Printf("Error in plugins: %s", err) + log.Printf("Error in plugin [%s]: %s", plugin.name, err) } }(plugin) } From 610f24e0cd1b43f0a2fcfa68a9e6ab874aa933a6 Mon Sep 17 00:00:00 2001 From: nickscript0 Date: Mon, 24 Aug 2015 11:33:37 -0600 Subject: [PATCH 006/125] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3dc99275..63a8a2f99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.1.7 [unreleased] ### Features + - [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! ### Bigfixes - [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix. From b014ac12ee510e79987cb0388014f6e95a0dab03 Mon Sep 17 00:00:00 2001 From: nickscript0 Date: Mon, 24 Aug 2015 11:36:27 -0600 Subject: [PATCH 007/125] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 63a8a2f99..db446fb8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ ## v0.1.7 [unreleased] ### Features - - [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! +- [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! ### Bigfixes - [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix. From 50f902cb02a08b16c49e7fa8b2933f9276492946 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 24 Aug 2015 13:25:20 -0600 Subject: [PATCH 008/125] Fixes #128, add system load and swap back to default Telegraf config --- CHANGELOG.md | 3 ++- etc/config.sample.toml | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index db446fb8a..8371811fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,8 @@ ### Features - [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! -### Bigfixes +### Bugfixes +- [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. - [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix. ## v0.1.6 [2015-08-20] diff --git a/etc/config.sample.toml b/etc/config.sample.toml index ac208fb1a..1b7263e4c 100644 --- a/etc/config.sample.toml +++ b/etc/config.sample.toml @@ -68,3 +68,9 @@ totalcpu = true # Read metrics about memory usage [mem] # no configuration + +[system] + # no configuration + +[swap] + # no configuration From 42602a3f356e54457ef3e411db18e8272076fd79 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 24 Aug 2015 14:52:46 -0600 Subject: [PATCH 009/125] Provide a -usage flag for printing the usage of a single plugin Closes #136 --- cmd/telegraf/telegraf.go | 18 +++++++++++++++--- config.go | 12 ++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 07a61146d..38f323215 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -13,13 +13,18 @@ import ( _ "github.com/influxdb/telegraf/plugins/all" ) -var fDebug = flag.Bool("debug", false, "show metrics as they're generated to stdout") +var fDebug = flag.Bool("debug", false, + "show metrics as they're generated to stdout") var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") var fConfig = flag.String("config", "", "configuration file to load") var fVersion = flag.Bool("version", false, "display the version") -var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") +var fSampleConfig = flag.Bool("sample-config", false, + "print out full sample configuration") var fPidfile = flag.String("pidfile", "", "file to write our pid to") -var fPLuginsFilter = flag.String("filter", "", "filter the plugins to enable, separator is :") +var fPLuginsFilter = flag.String("filter", "", + "filter the plugins to enable, separator is :") +var fUsage = flag.String("usage", "", + "print usage for a plugin, ie, 'telegraf -usage mysql'") // Telegraf version // -ldflags "-X main.Version=`git describe --always --tags`" @@ -39,6 +44,13 @@ func main() { return } + if *fUsage != "" { + if err := telegraf.PrintPluginConfig(*fUsage); err != nil { + log.Fatal(err) + } + return + } + var ( config *telegraf.Config err error diff --git a/config.go b/config.go index 3ba9d4a49..fe47c6d0d 100644 --- a/config.go +++ b/config.go @@ -421,3 +421,15 @@ func PrintSampleConfig() { } } } + +// PrintPluginConfig prints the config usage of a single plugin. +func PrintPluginConfig(name string) error { + if creator, ok := plugins.Plugins[name]; ok { + plugin := creator() + fmt.Printf("# %s\n[%s]\n", plugin.Description(), name) + fmt.Printf(strings.TrimSpace(plugin.SampleConfig())) + } else { + return errors.New(fmt.Sprintf("Plugin %s not found", name)) + } + return nil +} From a449e4b47cdba0ed17127e384cdf0f981b69125c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 24 Aug 2015 14:56:50 -0600 Subject: [PATCH 010/125] Add #136 to CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8371811fc..931798b4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ### Features - [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! +- [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. ### Bugfixes - [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. From bd85a36cb1e88450bea2792e989fa8074b5798a4 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 24 Aug 2015 14:08:47 -0600 Subject: [PATCH 011/125] Fixes #130, document mysql plugin better, README --- README.md | 7 +++++-- plugins/mysql/mysql.go | 4 +++- plugins/postgresql/postgresql.go | 4 ++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 9fa599e14..7df4a26a9 100644 --- a/README.md +++ b/README.md @@ -133,11 +133,14 @@ path = [ "/opt", "/home" ] ## Supported Plugins -Telegraf currently has support for collecting metrics from: +**You can view usage instructions for each plugin by running** +`telegraf -usage ` + +Telegraf currently has support for collecting metrics from * disque * elasticsearch -* exec (generic executable JSON-gathering plugin) +* exec (generic JSON-emitting executable plugin) * haproxy * httpjson (generic JSON-emitting http service plugin) * kafka_consumer diff --git a/plugins/mysql/mysql.go b/plugins/mysql/mysql.go index a55006a4d..9714b25e9 100644 --- a/plugins/mysql/mysql.go +++ b/plugins/mysql/mysql.go @@ -16,7 +16,9 @@ type Mysql struct { var sampleConfig = ` # specify servers via a url matching: # [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] -# e.g. root:root@http://10.0.0.18/?tls=false +# e.g. +# root:root@http://10.0.0.18/?tls=false +# root:passwd@tcp(127.0.0.1:3036)/ # # If no servers are specified, then localhost is used as the host. servers = ["localhost"]` diff --git a/plugins/postgresql/postgresql.go b/plugins/postgresql/postgresql.go index 85479577d..9312c4aa4 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/postgresql/postgresql.go @@ -25,11 +25,11 @@ var sampleConfig = ` # postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full] # or a simple string: # host=localhost user=pqotest password=... sslmode=... -# +# # All connection parameters are optional. By default, the host is localhost # and the user is the currently running user. For localhost, we default # to sslmode=disable as well. -# +# address = "sslmode=disable" From 85ae6fffbba798a6c0773f89bc9aa7cd1e47938b Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 25 Aug 2015 11:23:40 -0600 Subject: [PATCH 012/125] Vagrantfile: do a one-way rsync so that binaries don't get shared between VMs and host --- Vagrantfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 79996489f..72124a8ac 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -7,7 +7,10 @@ VAGRANTFILE_API_VERSION = "2" Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.box = "ubuntu/trusty64" - config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/influxdb/telegraf" + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/influxdb/telegraf", + type: "rsync", + rsync__args: ["--verbose", "--archive", "--delete", "-z", "--safe-links"], + rsync__exclude: ["./telegraf", ".vagrant/"] config.vm.provision "shell", name: "sudo", inline: <<-SHELL chown -R vagrant:vagrant /home/vagrant/go From 8a6665c03f4a119a79fe11dade903b7bb5fc5642 Mon Sep 17 00:00:00 2001 From: Bruno Bigras Date: Tue, 25 Aug 2015 14:17:15 -0400 Subject: [PATCH 013/125] memcached: fix when a value contains a space Fixes #137 Closes #139 --- CHANGELOG.md | 1 + plugins/memcached/memcached.go | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 931798b4b..a26ed0ce0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Features - [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! - [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. +- [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space ### Bugfixes - [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. diff --git a/plugins/memcached/memcached.go b/plugins/memcached/memcached.go index 802fae35d..7035bde04 100644 --- a/plugins/memcached/memcached.go +++ b/plugins/memcached/memcached.go @@ -100,14 +100,13 @@ func (m *Memcached) gatherServer(address string, acc plugins.Accumulator) error break } // Read values - var name, value string - n, errScan := fmt.Sscanf(string(line), "STAT %s %s\r\n", &name, &value) - if errScan != nil || n != 2 { + s := bytes.SplitN(line, []byte(" "), 3) + if len(s) != 3 || !bytes.Equal(s[0], []byte("STAT")) { return fmt.Errorf("unexpected line in stats response: %q", line) } // Save values - values[name] = value + values[string(s[1])] = string(s[2]) } // From 94eed9b43c48fb3c8e7a8396c170075ef15948ab Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 25 Aug 2015 13:18:33 -0600 Subject: [PATCH 014/125] Add MySQL server address tag to all measurements Closes #138 --- CHANGELOG.md | 1 + plugins/memcached/memcached.go | 2 +- plugins/mysql/mysql.go | 15 ++++++++++++--- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a26ed0ce0..19ecc0aa3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! - [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. - [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space +- [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag. ### Bugfixes - [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. diff --git a/plugins/memcached/memcached.go b/plugins/memcached/memcached.go index 7035bde04..2cf555c87 100644 --- a/plugins/memcached/memcached.go +++ b/plugins/memcached/memcached.go @@ -109,7 +109,7 @@ func (m *Memcached) gatherServer(address string, acc plugins.Accumulator) error values[string(s[1])] = string(s[2]) } - // + // Add server address as a tag tags := map[string]string{"server": address} // Process values diff --git a/plugins/mysql/mysql.go b/plugins/mysql/mysql.go index 9714b25e9..425f3ea98 100644 --- a/plugins/mysql/mysql.go +++ b/plugins/mysql/mysql.go @@ -111,10 +111,19 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { var found bool + // Parse out user/password from server address tag if given + var servtag string + if strings.Contains(serv, "@") { + servtag = strings.Split(serv, "@")[1] + } else { + servtag = serv + } + tags := map[string]string{"server": servtag} + for _, mapped := range mappings { if strings.HasPrefix(name, mapped.onServer) { i, _ := strconv.Atoi(string(val.([]byte))) - acc.Add(mapped.inExport+name[len(mapped.onServer):], i, nil) + acc.Add(mapped.inExport+name[len(mapped.onServer):], i, tags) found = true } } @@ -130,14 +139,14 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { return err } - acc.Add("queries", i, nil) + acc.Add("queries", i, tags) case "Slow_queries": i, err := strconv.ParseInt(string(val.([]byte)), 10, 64) if err != nil { return err } - acc.Add("slow_queries", i, nil) + acc.Add("slow_queries", i, tags) } } From 0acf15c025b36a843cb0aa6b2700a2c5140bedc8 Mon Sep 17 00:00:00 2001 From: Bruno Bigras Date: Tue, 25 Aug 2015 16:00:01 -0400 Subject: [PATCH 015/125] Typo: prec -> perc Closes #140 --- CHANGELOG.md | 1 + plugins/system/memory.go | 2 +- plugins/system/system_test.go | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19ecc0aa3..6ab35aedf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ### Bugfixes - [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. - [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix. +- [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc! ## v0.1.6 [2015-08-20] diff --git a/plugins/system/memory.go b/plugins/system/memory.go index c835dc76d..048012e83 100644 --- a/plugins/system/memory.go +++ b/plugins/system/memory.go @@ -27,7 +27,7 @@ func (s *MemStats) Gather(acc plugins.Accumulator) error { acc.Add("total", vm.Total, vmtags) acc.Add("available", vm.Available, vmtags) acc.Add("used", vm.Used, vmtags) - acc.Add("used_prec", vm.UsedPercent, vmtags) + acc.Add("used_perc", vm.UsedPercent, vmtags) acc.Add("free", vm.Free, vmtags) acc.Add("active", vm.Active, vmtags) acc.Add("inactive", vm.Inactive, vmtags) diff --git a/plugins/system/system_test.go b/plugins/system/system_test.go index 19e05e64d..88a35071a 100644 --- a/plugins/system/system_test.go +++ b/plugins/system/system_test.go @@ -310,7 +310,7 @@ func TestSystemStats_GenerateStats(t *testing.T) { assert.True(t, acc.CheckTaggedValue("total", uint64(12400), vmtags)) assert.True(t, acc.CheckTaggedValue("available", uint64(7600), vmtags)) assert.True(t, acc.CheckTaggedValue("used", uint64(5000), vmtags)) - assert.True(t, acc.CheckTaggedValue("used_prec", float64(47.1), vmtags)) + assert.True(t, acc.CheckTaggedValue("used_perc", float64(47.1), vmtags)) assert.True(t, acc.CheckTaggedValue("free", uint64(1235), vmtags)) assert.True(t, acc.CheckTaggedValue("active", uint64(8134), vmtags)) assert.True(t, acc.CheckTaggedValue("inactive", uint64(1124), vmtags)) From ca1d2c7000b526a1ec4cd850e0c382cbf0cd1b0b Mon Sep 17 00:00:00 2001 From: subhachandrachandra Date: Fri, 21 Aug 2015 15:15:19 -0700 Subject: [PATCH 016/125] Fixed total memory reporting for Darwin systems. hw.memsize is reported as bytes instead of pages. --- plugins/system/ps/mem/mem_darwin.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/system/ps/mem/mem_darwin.go b/plugins/system/ps/mem/mem_darwin.go index 5d2ff7e3e..43da44d1d 100644 --- a/plugins/system/ps/mem/mem_darwin.go +++ b/plugins/system/ps/mem/mem_darwin.go @@ -53,7 +53,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { } ret := &VirtualMemoryStat{ - Total: parsed[0] * p, + Total: parsed[0], Free: parsed[1] * p, } From 8d034f544c14a974fb7cb57acfff163cf91ff664 Mon Sep 17 00:00:00 2001 From: subhachandrachandra Date: Fri, 21 Aug 2015 16:08:54 -0700 Subject: [PATCH 017/125] Fixed memory reporting for Linux systems /proc/meminfo reports memory in KiloBytes and so needs a multiplier of 1024 instead of 1000. The kernel reports in terms of pages and the proc filesystem is left shifting by 2 for 4KB pages to get KB. Since this is a binary shift, Bytes will need to shift by 10 and so get multiplied by 1024. From the kernel code. PAGE_SHIFT = 12 for 4KB pages "MemTotal: %8lu kB\n", K(i.totalram) Closes #131 --- CHANGELOG.md | 1 + plugins/system/ps/mem/mem_linux.go | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ab35aedf..068f65f44 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ### Bugfixes - [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. - [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix. +- [#131](https://github.com/influxdb/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra! - [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc! ## v0.1.6 [2015-08-20] diff --git a/plugins/system/ps/mem/mem_linux.go b/plugins/system/ps/mem/mem_linux.go index a0505b5f5..42a49a2b6 100644 --- a/plugins/system/ps/mem/mem_linux.go +++ b/plugins/system/ps/mem/mem_linux.go @@ -30,17 +30,17 @@ func VirtualMemory() (*VirtualMemoryStat, error) { } switch key { case "MemTotal": - ret.Total = t * 1000 + ret.Total = t * 1024 case "MemFree": - ret.Free = t * 1000 + ret.Free = t * 1024 case "Buffers": - ret.Buffers = t * 1000 + ret.Buffers = t * 1024 case "Cached": - ret.Cached = t * 1000 + ret.Cached = t * 1024 case "Active": - ret.Active = t * 1000 + ret.Active = t * 1024 case "Inactive": - ret.Inactive = t * 1000 + ret.Inactive = t * 1024 } } ret.Available = ret.Free + ret.Buffers + ret.Cached From ac97fefb91f7fb443be188d9f23a303fc28d58e9 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 25 Aug 2015 16:34:30 -0600 Subject: [PATCH 018/125] makefile: ADVERTISED_HOST needs only be set during docker-compose target --- Makefile | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 1e2f9c383..5c27c5d95 100644 --- a/Makefile +++ b/Makefile @@ -10,17 +10,14 @@ prepare: go get github.com/tools/godep docker-compose: - docker-compose up -d - -test: ifeq ($(UNAME), Darwin) - ADVERTISED_HOST=$(shell sh -c 'boot2docker ip') $(MAKE) test-full + ADVERTISED_HOST=$(shell sh -c 'boot2docker ip') docker-compose up -d endif ifeq ($(UNAME), Linux) - ADVERTISED_HOST=localhost $(MAKE) test-full + ADVERTISED_HOST=localhost docker-compose up -d endif -test-full: prepare docker-compose +test: prepare docker-compose $(GOPATH)/bin/godep go test -v ./... test-short: prepare From ab4344a781fe896fbfb7d19e3347aa142a454237 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 25 Aug 2015 16:52:16 -0600 Subject: [PATCH 019/125] Merge problem, re-enable non-standard DB names --- outputs/influxdb/influxdb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/outputs/influxdb/influxdb.go b/outputs/influxdb/influxdb.go index 96505a4d7..dd6546928 100644 --- a/outputs/influxdb/influxdb.go +++ b/outputs/influxdb/influxdb.go @@ -41,7 +41,7 @@ func (i *InfluxDB) Connect() error { } _, err = c.Query(client.Query{ - Command: fmt.Sprintf("CREATE DATABASE telegraf"), + Command: fmt.Sprintf("CREATE DATABASE %s", i.Database), }) if err != nil && !strings.Contains(err.Error(), "database already exists") { From 846fd311210cdc944bca55394e0c4530412b2346 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 25 Aug 2015 18:16:01 -0600 Subject: [PATCH 020/125] Improve build from source instructions Closes #141 --- README.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7df4a26a9..bcf06d319 100644 --- a/README.md +++ b/README.md @@ -46,9 +46,14 @@ brew install telegraf ### From Source: -Telegraf manages dependencies via `godep`, which gets installed via the Makefile. -Assuming you have your GOPATH setup, `make build` should be enough to gather dependencies -and build telegraf. +Telegraf manages dependencies via `godep`, which gets installed via the Makefile +if you don't have it already. You also must build with golang version 1.4+ + +1. [Install Go](https://golang.org/doc/install) +2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH) +3. run `go get github.com/influxdb/telegraf` +4. `cd $GOPATH/src/github.com/influxdb/telegraf` +5. run `make` ### How to use it: From a00510a73c7081b15fd77a20f75723fff369eb6f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 25 Aug 2015 17:59:12 -0600 Subject: [PATCH 021/125] Outputs enhancement to require Description and SampleConfig functions Closes #142 --- CHANGELOG.md | 1 + config.go | 98 +++++++++++++++++++----------------- outputs/datadog/datadog.go | 16 ++++++ outputs/influxdb/influxdb.go | 27 ++++++++++ outputs/registry.go | 2 + 5 files changed, 97 insertions(+), 47 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 068f65f44..2f669de0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. - [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space - [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag. +- [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface ### Bugfixes - [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. diff --git a/config.go b/config.go index fe47c6d0d..5eb22ffe6 100644 --- a/config.go +++ b/config.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/influxdb/telegraf/outputs" "github.com/influxdb/telegraf/plugins" "github.com/naoina/toml" "github.com/naoina/toml/ast" @@ -326,9 +327,6 @@ type hasDescr interface { var header = `# Telegraf configuration -# If this file is missing an [agent] section, you must first generate a -# valid config with 'telegraf -sample-config > telegraf.toml' - # Telegraf is entirely plugin driven. All metrics are gathered from the # declared plugins. @@ -348,40 +346,28 @@ var header = `# Telegraf configuration # NOTE: The configuration has a few required parameters. They are marked # with 'required'. Be sure to edit those to make this configuration work. -# OUTPUTS -[outputs] - -# Configuration for influxdb server to send metrics to -[outputs.influxdb] -# The full HTTP endpoint URL for your InfluxDB instance -url = "http://localhost:8086" # required. - -# The target database for metrics. This database must already exist -database = "telegraf" # required. - -# Connection timeout (for the connection with InfluxDB), formatted as a string. -# Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -# If not provided, will default to 0 (no timeout) -# timeout = "5s" - -# username = "telegraf" -# password = "metricsmetricsmetricsmetrics" - -# Set the user agent for the POSTs (can be useful for log differentiation) -# user_agent = "telegraf" - # Tags can also be specified via a normal map, but only one form at a time: - -# [tags] -# dc = "us-east-1" } +[tags] + # dc = "us-east-1" # Configuration for telegraf itself -# [agent] -# interval = "10s" -# debug = false -# hostname = "prod3241" +[agent] + # interval = "10s" + # debug = false + # hostname = "prod3241" -# PLUGINS +############################################################################### +# OUTPUTS # +############################################################################### + +[outputs] +` + +var header2 = ` + +############################################################################### +# PLUGINS # +############################################################################### ` @@ -389,34 +375,52 @@ database = "telegraf" # required. func PrintSampleConfig() { fmt.Printf(header) - var names []string + // Print Outputs + var onames []string - for name := range plugins.Plugins { - names = append(names, name) + for oname := range outputs.Outputs { + onames = append(onames, oname) + } + sort.Strings(onames) + + for _, oname := range onames { + creator := outputs.Outputs[oname] + output := creator() + + fmt.Printf("\n# %s\n[outputs.%s]\n", output.Description(), oname) + + config := output.SampleConfig() + if config == "" { + fmt.Printf(" # no configuration\n\n") + } else { + fmt.Printf(config) + } } - sort.Strings(names) + fmt.Printf(header2) - for _, name := range names { - creator := plugins.Plugins[name] + // Print Plugins + var pnames []string + for pname := range plugins.Plugins { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + + for _, pname := range pnames { + creator := plugins.Plugins[pname] plugin := creator() - fmt.Printf("# %s\n[%s]\n", plugin.Description(), name) - - var config string - - config = strings.TrimSpace(plugin.SampleConfig()) + fmt.Printf("# %s\n[%s]\n", plugin.Description(), pname) + config := plugin.SampleConfig() if config == "" { - fmt.Printf(" # no configuration\n\n") + fmt.Printf(" # no configuration\n\n") } else { - fmt.Printf("\n") lines := strings.Split(config, "\n") for _, line := range lines { fmt.Printf("%s\n", line) } - fmt.Printf("\n") } } diff --git a/outputs/datadog/datadog.go b/outputs/datadog/datadog.go index b79d1c828..627ba125e 100644 --- a/outputs/datadog/datadog.go +++ b/outputs/datadog/datadog.go @@ -21,6 +21,14 @@ type Datadog struct { client *http.Client } +var sampleConfig = ` + # Datadog API key + apikey = "my-secret-key" # required. + + # Connection timeout. + # timeout = "5s" +` + type TimeSeries struct { Series []*Metric `json:"series"` } @@ -91,6 +99,14 @@ func (d *Datadog) Write(bp client.BatchPoints) error { return nil } +func (d *Datadog) SampleConfig() string { + return sampleConfig +} + +func (d *Datadog) Description() string { + return "Configuration for DataDog API to send metrics to." +} + func (d *Datadog) authenticatedUrl() string { q := url.Values{ "api_key": []string{d.Apikey}, diff --git a/outputs/influxdb/influxdb.go b/outputs/influxdb/influxdb.go index dd6546928..5bb74b4e3 100644 --- a/outputs/influxdb/influxdb.go +++ b/outputs/influxdb/influxdb.go @@ -22,6 +22,25 @@ type InfluxDB struct { conn *client.Client } +var sampleConfig = ` + # The full HTTP endpoint URL for your InfluxDB instance + url = "http://localhost:8086" # required. + + # The target database for metrics. This database must already exist + database = "telegraf" # required. + + # Connection timeout (for the connection with InfluxDB), formatted as a string. + # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + # If not provided, will default to 0 (no timeout) + # timeout = "5s" + + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + # Set the user agent for the POSTs (can be useful for log differentiation) + # user_agent = "telegraf" +` + func (i *InfluxDB) Connect() error { u, err := url.Parse(i.URL) if err != nil { @@ -57,6 +76,14 @@ func (i *InfluxDB) Close() error { return nil } +func (i *InfluxDB) SampleConfig() string { + return sampleConfig +} + +func (i *InfluxDB) Description() string { + return "Configuration for influxdb server to send metrics to" +} + func (i *InfluxDB) Write(bp client.BatchPoints) error { bp.Database = i.Database if _, err := i.conn.Write(bp); err != nil { diff --git a/outputs/registry.go b/outputs/registry.go index a2f22f73b..92ce2b34e 100644 --- a/outputs/registry.go +++ b/outputs/registry.go @@ -7,6 +7,8 @@ import ( type Output interface { Connect() error Close() error + Description() string + SampleConfig() string Write(client.BatchPoints) error } From 434267898b9bb514b63b436c71f125947fdb604d Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 26 Aug 2015 09:21:39 -0600 Subject: [PATCH 022/125] Indent the toml config for readability --- CHANGELOG.md | 1 + config.go | 14 ++++------ plugins/disque/disque.go | 13 ++++----- plugins/elasticsearch/elasticsearch.go | 10 +++---- plugins/exec/exec.go | 12 ++++----- plugins/haproxy/haproxy.go | 14 +++++----- plugins/httpjson/httpjson.go | 30 ++++++++++----------- plugins/kafka_consumer/kafka_consumer.go | 17 ++++++------ plugins/leofs/leofs.go | 10 +++---- plugins/lustre2/lustre2.go | 11 ++++---- plugins/memcached/memcached.go | 11 ++++---- plugins/mongodb/mongodb.go | 13 ++++----- plugins/mysql/mysql.go | 17 ++++++------ plugins/nginx/nginx.go | 5 ++-- plugins/postgresql/postgresql.go | 34 ++++++++++++------------ plugins/prometheus/prometheus.go | 5 ++-- plugins/rabbitmq/rabbitmq.go | 16 +++++------ plugins/redis/redis.go | 13 ++++----- plugins/rethinkdb/rethinkdb.go | 13 ++++----- plugins/system/cpu.go | 9 ++++--- plugins/system/net.go | 10 +++---- 21 files changed, 143 insertions(+), 135 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f669de0e..2dfa674eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space - [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag. - [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface +- Indent the toml config file for readability ### Bugfixes - [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing. diff --git a/config.go b/config.go index 5eb22ffe6..516fee1e5 100644 --- a/config.go +++ b/config.go @@ -387,11 +387,11 @@ func PrintSampleConfig() { creator := outputs.Outputs[oname] output := creator() - fmt.Printf("\n# %s\n[outputs.%s]\n", output.Description(), oname) + fmt.Printf("\n# %s\n[outputs.%s]", output.Description(), oname) config := output.SampleConfig() if config == "" { - fmt.Printf(" # no configuration\n\n") + fmt.Printf("\n # no configuration\n") } else { fmt.Printf(config) } @@ -411,17 +411,13 @@ func PrintSampleConfig() { creator := plugins.Plugins[pname] plugin := creator() - fmt.Printf("# %s\n[%s]\n", plugin.Description(), pname) + fmt.Printf("\n# %s\n[%s]", plugin.Description(), pname) config := plugin.SampleConfig() if config == "" { - fmt.Printf(" # no configuration\n\n") + fmt.Printf("\n # no configuration\n") } else { - lines := strings.Split(config, "\n") - for _, line := range lines { - fmt.Printf("%s\n", line) - } - fmt.Printf("\n") + fmt.Printf(config) } } } diff --git a/plugins/disque/disque.go b/plugins/disque/disque.go index 292e1b363..4e4e3b47f 100644 --- a/plugins/disque/disque.go +++ b/plugins/disque/disque.go @@ -21,12 +21,13 @@ type Disque struct { } var sampleConfig = ` -# An array of URI to gather stats about. Specify an ip or hostname -# with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, -# 10.0.0.1:10000, etc. -# -# If no servers are specified, then localhost is used as the host. -servers = ["localhost"]` + # An array of URI to gather stats about. Specify an ip or hostname + # with optional port and password. ie disque://localhost, disque://10.10.3.33:18832, + # 10.0.0.1:10000, etc. + # + # If no servers are specified, then localhost is used as the host. + servers = ["localhost"] +` func (r *Disque) SampleConfig() string { return sampleConfig diff --git a/plugins/elasticsearch/elasticsearch.go b/plugins/elasticsearch/elasticsearch.go index 5607532e6..2127e93b7 100644 --- a/plugins/elasticsearch/elasticsearch.go +++ b/plugins/elasticsearch/elasticsearch.go @@ -28,12 +28,12 @@ type node struct { } const sampleConfig = ` -# specify a list of one or more Elasticsearch servers -servers = ["http://localhost:9200"] + # specify a list of one or more Elasticsearch servers + servers = ["http://localhost:9200"] -# set local to false when you want to read the indices stats from all nodes -# within the cluster -local = true + # set local to false when you want to read the indices stats from all nodes + # within the cluster + local = true ` // Elasticsearch is a plugin to read stats from one or many Elasticsearch diff --git a/plugins/exec/exec.go b/plugins/exec/exec.go index 8ab68e518..6c340db6f 100644 --- a/plugins/exec/exec.go +++ b/plugins/exec/exec.go @@ -11,13 +11,13 @@ import ( ) const sampleConfig = ` -# specify commands via an array of tables -[[exec.commands]] -# the command to run -command = "/usr/bin/mycollector --foo=bar" + # specify commands via an array of tables + [[exec.commands]] + # the command to run + command = "/usr/bin/mycollector --foo=bar" -# name of the command (used as a prefix for measurements) -name = "mycollector" + # name of the command (used as a prefix for measurements) + name = "mycollector" ` type Command struct { diff --git a/plugins/haproxy/haproxy.go b/plugins/haproxy/haproxy.go index e09bfe5be..df03ecced 100644 --- a/plugins/haproxy/haproxy.go +++ b/plugins/haproxy/haproxy.go @@ -84,13 +84,13 @@ type haproxy struct { } var sampleConfig = ` -# An array of address to gather stats about. Specify an ip on hostname -# with optional port. ie localhost, 10.10.3.33:1936, etc. -# -# If no servers are specified, then default to 127.0.0.1:1936 -servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] -# Or you can also use local socket(not work yet) -# servers = ["socket:/run/haproxy/admin.sock"] + # An array of address to gather stats about. Specify an ip on hostname + # with optional port. ie localhost, 10.10.3.33:1936, etc. + # + # If no servers are specified, then default to 127.0.0.1:1936 + servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] + # Or you can also use local socket(not work yet) + # servers = ["socket:/run/haproxy/admin.sock"] ` func (r *haproxy) SampleConfig() string { diff --git a/plugins/httpjson/httpjson.go b/plugins/httpjson/httpjson.go index 16b232f89..665622d46 100644 --- a/plugins/httpjson/httpjson.go +++ b/plugins/httpjson/httpjson.go @@ -46,25 +46,25 @@ func (c RealHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) { } var sampleConfig = ` -# Specify services via an array of tables -[[httpjson.services]] + # Specify services via an array of tables + [[httpjson.services]] - # a name for the service being polled - name = "webserver_stats" + # a name for the service being polled + name = "webserver_stats" - # URL of each server in the service's cluster - servers = [ - "http://localhost:9999/stats/", - "http://localhost:9998/stats/", - ] + # URL of each server in the service's cluster + servers = [ + "http://localhost:9999/stats/", + "http://localhost:9998/stats/", + ] - # HTTP method to use (case-sensitive) - method = "GET" + # HTTP method to use (case-sensitive) + method = "GET" - # HTTP parameters (all values must be strings) - [httpjson.services.parameters] - event_type = "cpu_spike" - threshold = "0.75" + # HTTP parameters (all values must be strings) + [httpjson.services.parameters] + event_type = "cpu_spike" + threshold = "0.75" ` func (h *HttpJson) SampleConfig() string { diff --git a/plugins/kafka_consumer/kafka_consumer.go b/plugins/kafka_consumer/kafka_consumer.go index b2836e1d8..53fc8d110 100644 --- a/plugins/kafka_consumer/kafka_consumer.go +++ b/plugins/kafka_consumer/kafka_consumer.go @@ -20,17 +20,18 @@ type Kafka struct { } var sampleConfig = ` -# topic to consume -topic = "topic_with_metrics" + # topic to consume + topic = "topic_with_metrics" -# the name of the consumer group -consumerGroupName = "telegraf_metrics_consumers" + # the name of the consumer group + consumerGroupName = "telegraf_metrics_consumers" -# an array of Zookeeper connection strings -zookeeperPeers = ["localhost:2181"] + # an array of Zookeeper connection strings + zookeeperPeers = ["localhost:2181"] -# Batch size of points sent to InfluxDB -batchSize = 1000` + # Batch size of points sent to InfluxDB + batchSize = 1000 +` func (k *Kafka) SampleConfig() string { return sampleConfig diff --git a/plugins/leofs/leofs.go b/plugins/leofs/leofs.go index da08c3d72..ce6bd37ac 100644 --- a/plugins/leofs/leofs.go +++ b/plugins/leofs/leofs.go @@ -131,11 +131,11 @@ var serverTypeMapping = map[string]ServerType{ } var sampleConfig = ` -# An array of URI to gather stats about LeoFS. -# Specify an ip or hostname with port. ie 127.0.0.1:4020 -# -# If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port. -servers = ["127.0.0.1:4021"] + # An array of URI to gather stats about LeoFS. + # Specify an ip or hostname with port. ie 127.0.0.1:4020 + # + # If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port. + servers = ["127.0.0.1:4021"] ` func (l *LeoFS) SampleConfig() string { diff --git a/plugins/lustre2/lustre2.go b/plugins/lustre2/lustre2.go index 95b6bdbf7..549e0fc3f 100644 --- a/plugins/lustre2/lustre2.go +++ b/plugins/lustre2/lustre2.go @@ -25,11 +25,12 @@ type Lustre2 struct { } var sampleConfig = ` -# An array of /proc globs to search for Lustre stats -# If not specified, the default will work on Lustre 2.5.x -# -# ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"] -# mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"]` + # An array of /proc globs to search for Lustre stats + # If not specified, the default will work on Lustre 2.5.x + # + # ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"] + # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] +` /* The wanted fields would be a []string if not for the lines that start with read_bytes/write_bytes and contain diff --git a/plugins/memcached/memcached.go b/plugins/memcached/memcached.go index 2cf555c87..cbab91e85 100644 --- a/plugins/memcached/memcached.go +++ b/plugins/memcached/memcached.go @@ -17,11 +17,12 @@ type Memcached struct { } var sampleConfig = ` -# An array of address to gather stats about. Specify an ip on hostname -# with optional port. ie localhost, 10.0.0.1:11211, etc. -# -# If no servers are specified, then localhost is used as the host. -servers = ["localhost"]` + # An array of address to gather stats about. Specify an ip on hostname + # with optional port. ie localhost, 10.0.0.1:11211, etc. + # + # If no servers are specified, then localhost is used as the host. + servers = ["localhost"] +` var defaultTimeout = 5 * time.Second diff --git a/plugins/mongodb/mongodb.go b/plugins/mongodb/mongodb.go index 28bbe3af0..b26da7a4e 100644 --- a/plugins/mongodb/mongodb.go +++ b/plugins/mongodb/mongodb.go @@ -25,12 +25,13 @@ type Ssl struct { } var sampleConfig = ` -# An array of URI to gather stats about. Specify an ip or hostname -# with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, -# mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. -# -# If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port. -servers = ["127.0.0.1:27017"]` + # An array of URI to gather stats about. Specify an ip or hostname + # with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, + # mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. + # + # If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port. + servers = ["127.0.0.1:27017"] +` func (m *MongoDB) SampleConfig() string { return sampleConfig diff --git a/plugins/mysql/mysql.go b/plugins/mysql/mysql.go index 425f3ea98..1bc72ff2a 100644 --- a/plugins/mysql/mysql.go +++ b/plugins/mysql/mysql.go @@ -14,14 +14,15 @@ type Mysql struct { } var sampleConfig = ` -# specify servers via a url matching: -# [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] -# e.g. -# root:root@http://10.0.0.18/?tls=false -# root:passwd@tcp(127.0.0.1:3036)/ -# -# If no servers are specified, then localhost is used as the host. -servers = ["localhost"]` + # specify servers via a url matching: + # [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] + # e.g. + # root:root@http://10.0.0.18/?tls=false + # root:passwd@tcp(127.0.0.1:3036)/ + # + # If no servers are specified, then localhost is used as the host. + servers = ["localhost"] +` func (m *Mysql) SampleConfig() string { return sampleConfig diff --git a/plugins/nginx/nginx.go b/plugins/nginx/nginx.go index 00a7a174c..97f273b63 100644 --- a/plugins/nginx/nginx.go +++ b/plugins/nginx/nginx.go @@ -19,8 +19,9 @@ type Nginx struct { } var sampleConfig = ` -# An array of Nginx stub_status URI to gather stats. -urls = ["http://localhost/status"]` + # An array of Nginx stub_status URI to gather stats. + urls = ["http://localhost/status"] +` func (n *Nginx) SampleConfig() string { return sampleConfig diff --git a/plugins/postgresql/postgresql.go b/plugins/postgresql/postgresql.go index 9312c4aa4..1a467fee9 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/postgresql/postgresql.go @@ -18,28 +18,28 @@ type Postgresql struct { } var sampleConfig = ` -# specify servers via an array of tables -[[postgresql.servers]] + # specify servers via an array of tables + [[postgresql.servers]] -# specify address via a url matching: -# postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full] -# or a simple string: -# host=localhost user=pqotest password=... sslmode=... -# -# All connection parameters are optional. By default, the host is localhost -# and the user is the currently running user. For localhost, we default -# to sslmode=disable as well. -# + # specify address via a url matching: + # postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full] + # or a simple string: + # host=localhost user=pqotest password=... sslmode=... + # + # All connection parameters are optional. By default, the host is localhost + # and the user is the currently running user. For localhost, we default + # to sslmode=disable as well. + # -address = "sslmode=disable" + address = "sslmode=disable" -# A list of databases to pull metrics about. If not specified, metrics for all -# databases are gathered. + # A list of databases to pull metrics about. If not specified, metrics for all + # databases are gathered. -# databases = ["app_production", "blah_testing"] + # databases = ["app_production", "blah_testing"] -# [[postgresql.servers]] -# address = "influx@remoteserver" + # [[postgresql.servers]] + # address = "influx@remoteserver" ` func (p *Postgresql) SampleConfig() string { diff --git a/plugins/prometheus/prometheus.go b/plugins/prometheus/prometheus.go index 4029e9932..9e8b964e7 100644 --- a/plugins/prometheus/prometheus.go +++ b/plugins/prometheus/prometheus.go @@ -17,8 +17,9 @@ type Prometheus struct { } var sampleConfig = ` -# An array of urls to scrape metrics from. -urls = ["http://localhost:9100/metrics"]` + # An array of urls to scrape metrics from. + urls = ["http://localhost:9100/metrics"] +` func (r *Prometheus) SampleConfig() string { return sampleConfig diff --git a/plugins/rabbitmq/rabbitmq.go b/plugins/rabbitmq/rabbitmq.go index cd5ec6dc2..55b4b0a95 100644 --- a/plugins/rabbitmq/rabbitmq.go +++ b/plugins/rabbitmq/rabbitmq.go @@ -68,15 +68,15 @@ type Node struct { } var sampleConfig = ` -# Specify servers via an array of tables -[[rabbitmq.servers]] -# url = "http://localhost:15672" -# username = "guest" -# password = "guest" + # Specify servers via an array of tables + [[rabbitmq.servers]] + # url = "http://localhost:15672" + # username = "guest" + # password = "guest" -# A list of nodes to pull metrics about. If not specified, metrics for -# all nodes are gathered. -# nodes = ["rabbit@node1", "rabbit@node2"] + # A list of nodes to pull metrics about. If not specified, metrics for + # all nodes are gathered. + # nodes = ["rabbit@node1", "rabbit@node2"] ` func (r *RabbitMQ) SampleConfig() string { diff --git a/plugins/redis/redis.go b/plugins/redis/redis.go index d2f3dd374..013793f1b 100644 --- a/plugins/redis/redis.go +++ b/plugins/redis/redis.go @@ -21,12 +21,13 @@ type Redis struct { } var sampleConfig = ` -# An array of URI to gather stats about. Specify an ip or hostname -# with optional port add password. ie redis://localhost, redis://10.10.3.33:18832, -# 10.0.0.1:10000, etc. -# -# If no servers are specified, then localhost is used as the host. -servers = ["localhost"]` + # An array of URI to gather stats about. Specify an ip or hostname + # with optional port add password. ie redis://localhost, redis://10.10.3.33:18832, + # 10.0.0.1:10000, etc. + # + # If no servers are specified, then localhost is used as the host. + servers = ["localhost"] +` func (r *Redis) SampleConfig() string { return sampleConfig diff --git a/plugins/rethinkdb/rethinkdb.go b/plugins/rethinkdb/rethinkdb.go index 1c46a1f49..412145e1a 100644 --- a/plugins/rethinkdb/rethinkdb.go +++ b/plugins/rethinkdb/rethinkdb.go @@ -15,12 +15,13 @@ type RethinkDB struct { } var sampleConfig = ` -# An array of URI to gather stats about. Specify an ip or hostname -# with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105, -# rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc. -# -# If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port. -servers = ["127.0.0.1:28015"]` + # An array of URI to gather stats about. Specify an ip or hostname + # with optional port add password. ie rethinkdb://user:auth_key@10.10.3.30:28105, + # rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc. + # + # If no servers are specified, then 127.0.0.1 is used as the host and 28015 as the port. + servers = ["127.0.0.1:28015"] +` func (r *RethinkDB) SampleConfig() string { return sampleConfig diff --git a/plugins/system/cpu.go b/plugins/system/cpu.go index 1096b8517..ea34d2bbb 100644 --- a/plugins/system/cpu.go +++ b/plugins/system/cpu.go @@ -26,10 +26,11 @@ func (_ *CPUStats) Description() string { } var sampleConfig = ` -# Whether to report per-cpu stats or not -percpu = true -# Whether to report total system cpu stats or not -totalcpu = true` + # Whether to report per-cpu stats or not + percpu = true + # Whether to report total system cpu stats or not + totalcpu = true +` func (_ *CPUStats) SampleConfig() string { return sampleConfig diff --git a/plugins/system/net.go b/plugins/system/net.go index 014b01ea9..8a8847968 100644 --- a/plugins/system/net.go +++ b/plugins/system/net.go @@ -19,11 +19,11 @@ func (_ *NetIOStats) Description() string { } var netSampleConfig = ` -# By default, telegraf gathers stats from any up interface (excluding loopback) -# Setting interfaces will tell it to gather these explicit interfaces, -# regardless of status. -# -# interfaces = ["eth0", ... ] + # By default, telegraf gathers stats from any up interface (excluding loopback) + # Setting interfaces will tell it to gather these explicit interfaces, + # regardless of status. + # + # interfaces = ["eth0", ... ] ` func (_ *NetIOStats) SampleConfig() string { From d1f965ae30577bc6abc41c5544984dc5e91c08f5 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 26 Aug 2015 11:02:10 -0600 Subject: [PATCH 023/125] Kafka output producer, send telegraf metrics to Kafka brokers Closes #38 --- CHANGELOG.md | 1 + README.md | 31 +++++------ agent.go | 8 +++ cmd/telegraf/telegraf.go | 3 -- config.go | 2 +- outputs/all/all.go | 1 + outputs/datadog/datadog_test.go | 18 ++----- outputs/kafka/kafka.go | 91 +++++++++++++++++++++++++++++++++ outputs/kafka/kafka_test.go | 28 ++++++++++ testutil/testutil.go | 17 ++++++ 10 files changed, 167 insertions(+), 33 deletions(-) create mode 100644 outputs/kafka/kafka.go create mode 100644 outputs/kafka/kafka_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 2dfa674eb..71852ce96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v0.1.7 [unreleased] ### Features +- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output sink. - [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! - [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. - [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space diff --git a/README.md b/README.md index bcf06d319..de84f2a8c 100644 --- a/README.md +++ b/README.md @@ -103,21 +103,22 @@ at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output measurements at a 10s interval and will collect totalcpu & percpu data. ``` -[outputs] -[outputs.influxdb] -url = "http://192.168.59.103:8086" # required. -database = "telegraf" # required. - [tags] -dc = "denver-1" + dc = "denver-1" [agent] -interval = "10s" + interval = "10s" + +# OUTPUTS +[outputs] +[outputs.influxdb] + url = "http://192.168.59.103:8086" # required. + database = "telegraf" # required. # PLUGINS [cpu] -percpu = true -totalcpu = true + percpu = true + totalcpu = true ``` Below is how to configure `tagpass` parameters (added in 0.1.4) @@ -125,15 +126,15 @@ Below is how to configure `tagpass` parameters (added in 0.1.4) ``` # Don't collect CPU data for cpu6 & cpu7 [cpu.tagdrop] -cpu = [ "cpu6", "cpu7" ] + cpu = [ "cpu6", "cpu7" ] [disk] [disk.tagpass] -# tagpass conditions are OR, not AND. -# If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) -# then the metric passes -fstype = [ "ext4", "xfs" ] -path = [ "/opt", "/home" ] + # tagpass conditions are OR, not AND. + # If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) + # then the metric passes + fstype = [ "ext4", "xfs" ] + path = [ "/opt", "/home" ] ``` ## Supported Plugins diff --git a/agent.go b/agent.go index b35fae390..e5871b5b7 100644 --- a/agent.go +++ b/agent.go @@ -74,6 +74,9 @@ func (a *Agent) Connect() error { if err != nil { return err } + if a.Debug { + log.Printf("Successfully connected to output: %s\n", o.name) + } } return nil } @@ -160,6 +163,8 @@ func (a *Agent) LoadPlugins(pluginsFilter string) ([]string, error) { return names, nil } +// crankParallel runs the plugins that are using the same reporting interval +// as the telegraf agent. func (a *Agent) crankParallel() error { points := make(chan *BatchPoints, len(a.plugins)) @@ -203,6 +208,7 @@ func (a *Agent) crankParallel() error { return a.flush(&bp) } +// crank is mostly for test purposes. func (a *Agent) crank() error { var bp BatchPoints @@ -223,6 +229,8 @@ func (a *Agent) crank() error { return a.flush(&bp) } +// crankSeparate runs the plugins that have been configured with their own +// reporting interval. func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) error { ticker := time.NewTicker(plugin.config.Interval) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 38f323215..c7f863778 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -114,11 +114,8 @@ func main() { } shutdown := make(chan struct{}) - signals := make(chan os.Signal) - signal.Notify(signals, os.Interrupt) - go func() { <-signals close(shutdown) diff --git a/config.go b/config.go index 516fee1e5..19ebc00bf 100644 --- a/config.go +++ b/config.go @@ -356,6 +356,7 @@ var header = `# Telegraf configuration # debug = false # hostname = "prod3241" + ############################################################################### # OUTPUTS # ############################################################################### @@ -368,7 +369,6 @@ var header2 = ` ############################################################################### # PLUGINS # ############################################################################### - ` // PrintSampleConfig prints the sample config! diff --git a/outputs/all/all.go b/outputs/all/all.go index 0fb5f3723..36d11ea61 100644 --- a/outputs/all/all.go +++ b/outputs/all/all.go @@ -3,4 +3,5 @@ package all import ( _ "github.com/influxdb/telegraf/outputs/datadog" _ "github.com/influxdb/telegraf/outputs/influxdb" + _ "github.com/influxdb/telegraf/outputs/kafka" ) diff --git a/outputs/datadog/datadog_test.go b/outputs/datadog/datadog_test.go index 744afc99b..b5a7d3565 100644 --- a/outputs/datadog/datadog_test.go +++ b/outputs/datadog/datadog_test.go @@ -9,6 +9,8 @@ import ( "testing" "time" + "github.com/influxdb/telegraf/testutil" + "github.com/influxdb/influxdb/client" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,18 +27,6 @@ func fakeDatadog() *Datadog { return d } -func testData() client.BatchPoints { - var bp client.BatchPoints - bp.Time = time.Now() - bp.Tags = map[string]string{"tag1": "value1"} - bp.Points = []client.Point{ - { - Fields: map[string]interface{}{"value": 1.0}, - }, - } - return bp -} - func TestUriOverride(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) @@ -48,7 +38,7 @@ func TestUriOverride(t *testing.T) { d.Apikey = "123456" err := d.Connect() require.NoError(t, err) - err = d.Write(testData()) + err = d.Write(testutil.MockBatchPoints()) require.NoError(t, err) } @@ -67,7 +57,7 @@ func TestBadStatusCode(t *testing.T) { d.Apikey = "123456" err := d.Connect() require.NoError(t, err) - err = d.Write(testData()) + err = d.Write(testutil.MockBatchPoints()) if err == nil { t.Errorf("error expected but none returned") } else { diff --git a/outputs/kafka/kafka.go b/outputs/kafka/kafka.go new file mode 100644 index 000000000..ac4d61164 --- /dev/null +++ b/outputs/kafka/kafka.go @@ -0,0 +1,91 @@ +package kafka + +import ( + "errors" + "fmt" + + "github.com/Shopify/sarama" + "github.com/influxdb/influxdb/client" + "github.com/influxdb/telegraf/outputs" +) + +type Kafka struct { + // Kafka brokers to send metrics to + Brokers []string + // Kafka topic + Topic string + + producer sarama.SyncProducer +} + +var sampleConfig = ` + # URLs of kafka brokers + brokers = ["localhost:9092"] + # Kafka topic for producer messages + topic = "telegraf" +` + +func (k *Kafka) Connect() error { + producer, err := sarama.NewSyncProducer(k.Brokers, nil) + if err != nil { + return err + } + k.producer = producer + return nil +} + +func (k *Kafka) Close() error { + return k.producer.Close() +} + +func (k *Kafka) SampleConfig() string { + return sampleConfig +} + +func (k *Kafka) Description() string { + return "Configuration for the Kafka server to send metrics to" +} + +func (k *Kafka) Write(bp client.BatchPoints) error { + if len(bp.Points) == 0 { + return nil + } + + for _, p := range bp.Points { + // Combine tags from Point and BatchPoints and grab the resulting + // line-protocol output string to write to Kafka + var value string + if p.Raw != "" { + value = p.Raw + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + value = p.MarshalString() + } + + m := &sarama.ProducerMessage{ + Topic: k.Topic, + Value: sarama.StringEncoder(value), + } + if h, ok := p.Tags["host"]; ok { + m.Key = sarama.StringEncoder(h) + } + + _, _, err := k.producer.SendMessage(m) + if err != nil { + return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n", + err)) + } + } + return nil +} + +func init() { + outputs.Add("kafka", func() outputs.Output { + return &Kafka{} + }) +} diff --git a/outputs/kafka/kafka_test.go b/outputs/kafka/kafka_test.go new file mode 100644 index 000000000..e97bf1bb5 --- /dev/null +++ b/outputs/kafka/kafka_test.go @@ -0,0 +1,28 @@ +package kafka + +import ( + "testing" + + "github.com/influxdb/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConnectAndWrite(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + brokers := []string{testutil.GetLocalHost() + ":9092"} + k := &Kafka{ + Brokers: brokers, + Topic: "Test", + } + + // Verify that we can connect to the Kafka broker + err := k.Connect() + require.NoError(t, err) + + // Verify that we can successfully write data to the kafka broker + err = k.Write(testutil.MockBatchPoints()) + require.NoError(t, err) +} diff --git a/testutil/testutil.go b/testutil/testutil.go index 91eb4b6b9..79a7dd544 100644 --- a/testutil/testutil.go +++ b/testutil/testutil.go @@ -4,6 +4,9 @@ import ( "net" "net/url" "os" + "time" + + "github.com/influxdb/influxdb/client" ) var localhost = "localhost" @@ -27,3 +30,17 @@ func GetLocalHost() string { } return localhost } + +// MockBatchPoints returns a mock BatchPoints object for using in unit tests +// of telegraf output sinks. +func MockBatchPoints() client.BatchPoints { + var bp client.BatchPoints + bp.Time = time.Now() + bp.Tags = map[string]string{"tag1": "value1"} + bp.Points = []client.Point{ + { + Fields: map[string]interface{}{"value": 1.0}, + }, + } + return bp +} From 5b78b1e5484553ab42094181a2cbf2b2a11541d0 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 26 Aug 2015 17:43:09 -0600 Subject: [PATCH 024/125] Clean up agent error handling and logging of outputs/plugins Closes #145 --- CHANGELOG.md | 2 +- agent.go | 45 +++++++++++++++++++++++++-------------------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71852ce96..5db15d9dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ ## v0.1.7 [unreleased] ### Features -- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output sink. +- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer. - [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! - [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. - [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space diff --git a/agent.go b/agent.go index e5871b5b7..e54b7e863 100644 --- a/agent.go +++ b/agent.go @@ -1,6 +1,7 @@ package telegraf import ( + "errors" "fmt" "log" "os" @@ -184,8 +185,7 @@ func (a *Agent) crankParallel() error { acc.Prefix = plugin.name + "_" acc.Config = plugin.config - err := plugin.plugin.Gather(&acc) - if err != nil { + if err := plugin.plugin.Gather(&acc); err != nil { log.Printf("Error in plugin [%s]: %s", plugin.name, err) } @@ -236,22 +236,27 @@ func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) err for { var bp BatchPoints + var outerr error bp.Debug = a.Debug bp.Prefix = plugin.name + "_" bp.Config = plugin.config - err := plugin.plugin.Gather(&bp) - if err != nil { - return err + + if err := plugin.plugin.Gather(&bp); err != nil { + log.Printf("Error in plugin [%s]: %s", plugin.name, err) + outerr = errors.New("Error encountered processing plugins & outputs") } bp.Tags = a.Config.Tags bp.Time = time.Now() - err = a.flush(&bp) - if err != nil { - return err + if err := a.flush(&bp); err != nil { + outerr = errors.New("Error encountered processing plugins & outputs") + } + + if outerr != nil { + return outerr } select { @@ -266,16 +271,20 @@ func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) err func (a *Agent) flush(bp *BatchPoints) error { var wg sync.WaitGroup var outerr error + for _, o := range a.outputs { wg.Add(1) go func(ro *runningOutput) { defer wg.Done() - outerr = ro.output.Write(bp.BatchPoints) + // Log all output errors: + if err := ro.output.Write(bp.BatchPoints); err != nil { + log.Printf("Error in output [%s]: %s", ro.name, err) + outerr = errors.New("Error encountered flushing outputs") + } }(o) } wg.Wait() - return outerr } @@ -301,8 +310,7 @@ func (a *Agent) TestAllPlugins() error { fmt.Printf("* Plugin: %s\n", name) acc.Prefix = name + "_" - err := plugin.Gather(&acc) - if err != nil { + if err := plugin.Gather(&acc); err != nil { return err } } @@ -326,8 +334,7 @@ func (a *Agent) Test() error { fmt.Printf("* Internal: %s\n", plugin.config.Interval) } - err := plugin.plugin.Gather(&acc) - if err != nil { + if err := plugin.plugin.Gather(&acc); err != nil { return err } } @@ -344,9 +351,8 @@ func (a *Agent) Run(shutdown chan struct{}) error { wg.Add(1) go func(plugin *runningPlugin) { defer wg.Done() - err := a.crankSeparate(shutdown, plugin) - if err != nil { - log.Printf("Error in plugin [%s]: %s", plugin.name, err) + if err := a.crankSeparate(shutdown, plugin); err != nil { + log.Printf(err.Error()) } }(plugin) } @@ -357,9 +363,8 @@ func (a *Agent) Run(shutdown chan struct{}) error { ticker := time.NewTicker(a.Interval.Duration) for { - err := a.crankParallel() - if err != nil { - log.Printf("Error in plugins: %s", err) + if err := a.crankParallel(); err != nil { + log.Printf(err.Error()) } select { From ff2de0c715fc1be72112a3d42f18eb16f621d222 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 27 Aug 2015 13:28:49 -0600 Subject: [PATCH 025/125] Only build the docker plugin on linux --- agent_test.go | 8 +-- plugins/system/docker.go | 2 + plugins/system/docker_test.go | 118 ++++++++++++++++++++++++++++++++++ plugins/system/system_test.go | 100 ---------------------------- testdata/telegraf-agent.toml | 4 -- 5 files changed, 124 insertions(+), 108 deletions(-) create mode 100644 plugins/system/docker_test.go diff --git a/agent_test.go b/agent_test.go index e65cb7ae9..913ce565f 100644 --- a/agent_test.go +++ b/agent_test.go @@ -30,16 +30,16 @@ func TestAgent_LoadPlugin(t *testing.T) { assert.Equal(t, 2, len(pluginsEnabled)) pluginsEnabled, _ = a.LoadPlugins("") - assert.Equal(t, 24, len(pluginsEnabled)) + assert.Equal(t, 23, len(pluginsEnabled)) pluginsEnabled, _ = a.LoadPlugins(" ") - assert.Equal(t, 24, len(pluginsEnabled)) + assert.Equal(t, 23, len(pluginsEnabled)) pluginsEnabled, _ = a.LoadPlugins(" ") - assert.Equal(t, 24, len(pluginsEnabled)) + assert.Equal(t, 23, len(pluginsEnabled)) pluginsEnabled, _ = a.LoadPlugins("\n\t") - assert.Equal(t, 24, len(pluginsEnabled)) + assert.Equal(t, 23, len(pluginsEnabled)) } /* diff --git a/plugins/system/docker.go b/plugins/system/docker.go index 9eab7cf52..5e22dc88e 100644 --- a/plugins/system/docker.go +++ b/plugins/system/docker.go @@ -1,3 +1,5 @@ +// +build linux + package system import ( diff --git a/plugins/system/docker_test.go b/plugins/system/docker_test.go new file mode 100644 index 000000000..41dd2278b --- /dev/null +++ b/plugins/system/docker_test.go @@ -0,0 +1,118 @@ +// +build linux + +package system + +import ( + "testing" + + "github.com/influxdb/telegraf/plugins/system/ps/cpu" + "github.com/influxdb/telegraf/plugins/system/ps/docker" + "github.com/influxdb/telegraf/testutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDockerStats_GenerateStats(t *testing.T) { + var mps MockPS + var acc testutil.Accumulator + + ds := &DockerContainerStat{ + Name: "blah", + CPU: &cpu.CPUTimesStat{ + CPU: "all", + User: 3.1, + System: 8.2, + Idle: 80.1, + Nice: 1.3, + Iowait: 0.2, + Irq: 0.1, + Softirq: 0.11, + Steal: 0.0001, + Guest: 8.1, + GuestNice: 0.324, + Stolen: 0.051, + }, + Mem: &docker.CgroupMemStat{ + ContainerID: "blah", + Cache: 1, + RSS: 2, + RSSHuge: 3, + MappedFile: 4, + Pgpgin: 5, + Pgpgout: 6, + Pgfault: 7, + Pgmajfault: 8, + InactiveAnon: 9, + ActiveAnon: 10, + InactiveFile: 11, + ActiveFile: 12, + Unevictable: 13, + HierarchicalMemoryLimit: 14, + TotalCache: 15, + TotalRSS: 16, + TotalRSSHuge: 17, + TotalMappedFile: 18, + TotalPgpgIn: 19, + TotalPgpgOut: 20, + TotalPgFault: 21, + TotalPgMajFault: 22, + TotalInactiveAnon: 23, + TotalActiveAnon: 24, + TotalInactiveFile: 25, + TotalActiveFile: 26, + TotalUnevictable: 27, + }, + } + + mps.On("DockerStat").Return([]*DockerContainerStat{ds}, nil) + + err := (&DockerStats{&mps}).Gather(&acc) + require.NoError(t, err) + + dockertags := map[string]string{ + "name": "blah", + "id": "", + "command": "", + } + + assert.True(t, acc.CheckTaggedValue("user", 3.1, dockertags)) + assert.True(t, acc.CheckTaggedValue("system", 8.2, dockertags)) + assert.True(t, acc.CheckTaggedValue("idle", 80.1, dockertags)) + assert.True(t, acc.CheckTaggedValue("nice", 1.3, dockertags)) + assert.True(t, acc.CheckTaggedValue("iowait", 0.2, dockertags)) + assert.True(t, acc.CheckTaggedValue("irq", 0.1, dockertags)) + assert.True(t, acc.CheckTaggedValue("softirq", 0.11, dockertags)) + assert.True(t, acc.CheckTaggedValue("steal", 0.0001, dockertags)) + assert.True(t, acc.CheckTaggedValue("guest", 8.1, dockertags)) + assert.True(t, acc.CheckTaggedValue("guestNice", 0.324, dockertags)) + assert.True(t, acc.CheckTaggedValue("stolen", 0.051, dockertags)) + + assert.True(t, acc.CheckTaggedValue("cache", uint64(1), dockertags)) + assert.True(t, acc.CheckTaggedValue("rss", uint64(2), dockertags)) + assert.True(t, acc.CheckTaggedValue("rss_huge", uint64(3), dockertags)) + assert.True(t, acc.CheckTaggedValue("mapped_file", uint64(4), dockertags)) + assert.True(t, acc.CheckTaggedValue("swap_in", uint64(5), dockertags)) + assert.True(t, acc.CheckTaggedValue("swap_out", uint64(6), dockertags)) + assert.True(t, acc.CheckTaggedValue("page_fault", uint64(7), dockertags)) + assert.True(t, acc.CheckTaggedValue("page_major_fault", uint64(8), dockertags)) + assert.True(t, acc.CheckTaggedValue("inactive_anon", uint64(9), dockertags)) + assert.True(t, acc.CheckTaggedValue("active_anon", uint64(10), dockertags)) + assert.True(t, acc.CheckTaggedValue("inactive_file", uint64(11), dockertags)) + assert.True(t, acc.CheckTaggedValue("active_file", uint64(12), dockertags)) + assert.True(t, acc.CheckTaggedValue("unevictable", uint64(13), dockertags)) + assert.True(t, acc.CheckTaggedValue("memory_limit", uint64(14), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_cache", uint64(15), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_rss", uint64(16), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_rss_huge", uint64(17), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_mapped_file", uint64(18), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_swap_in", uint64(19), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_swap_out", uint64(20), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_page_fault", uint64(21), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_page_major_fault", uint64(22), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_inactive_anon", uint64(23), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_active_anon", uint64(24), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_inactive_file", uint64(25), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_active_file", uint64(26), dockertags)) + assert.True(t, acc.CheckTaggedValue("total_unevictable", uint64(27), dockertags)) +} diff --git a/plugins/system/system_test.go b/plugins/system/system_test.go index 88a35071a..78c13834b 100644 --- a/plugins/system/system_test.go +++ b/plugins/system/system_test.go @@ -7,7 +7,6 @@ import ( "github.com/influxdb/telegraf/plugins/system/ps/cpu" "github.com/influxdb/telegraf/plugins/system/ps/disk" - "github.com/influxdb/telegraf/plugins/system/ps/docker" "github.com/influxdb/telegraf/plugins/system/ps/load" "github.com/influxdb/telegraf/plugins/system/ps/mem" "github.com/influxdb/telegraf/plugins/system/ps/net" @@ -129,56 +128,6 @@ func TestSystemStats_GenerateStats(t *testing.T) { mps.On("SwapStat").Return(sms, nil) - ds := &DockerContainerStat{ - Name: "blah", - CPU: &cpu.CPUTimesStat{ - CPU: "all", - User: 3.1, - System: 8.2, - Idle: 80.1, - Nice: 1.3, - Iowait: 0.2, - Irq: 0.1, - Softirq: 0.11, - Steal: 0.0001, - Guest: 8.1, - GuestNice: 0.324, - Stolen: 0.051, - }, - Mem: &docker.CgroupMemStat{ - ContainerID: "blah", - Cache: 1, - RSS: 2, - RSSHuge: 3, - MappedFile: 4, - Pgpgin: 5, - Pgpgout: 6, - Pgfault: 7, - Pgmajfault: 8, - InactiveAnon: 9, - ActiveAnon: 10, - InactiveFile: 11, - ActiveFile: 12, - Unevictable: 13, - HierarchicalMemoryLimit: 14, - TotalCache: 15, - TotalRSS: 16, - TotalRSSHuge: 17, - TotalMappedFile: 18, - TotalPgpgIn: 19, - TotalPgpgOut: 20, - TotalPgFault: 21, - TotalPgMajFault: 22, - TotalInactiveAnon: 23, - TotalActiveAnon: 24, - TotalInactiveFile: 25, - TotalActiveFile: 26, - TotalUnevictable: 27, - }, - } - - mps.On("DockerStat").Return([]*DockerContainerStat{ds}, nil) - ss := &SystemStats{ps: &mps} err := ss.Gather(&acc) @@ -332,55 +281,6 @@ func TestSystemStats_GenerateStats(t *testing.T) { assert.NoError(t, acc.ValidateTaggedValue("free", uint64(6412), swaptags)) assert.NoError(t, acc.ValidateTaggedValue("in", uint64(7), swaptags)) assert.NoError(t, acc.ValidateTaggedValue("out", uint64(830), swaptags)) - - err = (&DockerStats{&mps}).Gather(&acc) - require.NoError(t, err) - - dockertags := map[string]string{ - "name": "blah", - "id": "", - "command": "", - } - - assert.True(t, acc.CheckTaggedValue("user", 3.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("system", 8.2, dockertags)) - assert.True(t, acc.CheckTaggedValue("idle", 80.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("nice", 1.3, dockertags)) - assert.True(t, acc.CheckTaggedValue("iowait", 0.2, dockertags)) - assert.True(t, acc.CheckTaggedValue("irq", 0.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("softirq", 0.11, dockertags)) - assert.True(t, acc.CheckTaggedValue("steal", 0.0001, dockertags)) - assert.True(t, acc.CheckTaggedValue("guest", 8.1, dockertags)) - assert.True(t, acc.CheckTaggedValue("guestNice", 0.324, dockertags)) - assert.True(t, acc.CheckTaggedValue("stolen", 0.051, dockertags)) - - assert.True(t, acc.CheckTaggedValue("cache", uint64(1), dockertags)) - assert.True(t, acc.CheckTaggedValue("rss", uint64(2), dockertags)) - assert.True(t, acc.CheckTaggedValue("rss_huge", uint64(3), dockertags)) - assert.True(t, acc.CheckTaggedValue("mapped_file", uint64(4), dockertags)) - assert.True(t, acc.CheckTaggedValue("swap_in", uint64(5), dockertags)) - assert.True(t, acc.CheckTaggedValue("swap_out", uint64(6), dockertags)) - assert.True(t, acc.CheckTaggedValue("page_fault", uint64(7), dockertags)) - assert.True(t, acc.CheckTaggedValue("page_major_fault", uint64(8), dockertags)) - assert.True(t, acc.CheckTaggedValue("inactive_anon", uint64(9), dockertags)) - assert.True(t, acc.CheckTaggedValue("active_anon", uint64(10), dockertags)) - assert.True(t, acc.CheckTaggedValue("inactive_file", uint64(11), dockertags)) - assert.True(t, acc.CheckTaggedValue("active_file", uint64(12), dockertags)) - assert.True(t, acc.CheckTaggedValue("unevictable", uint64(13), dockertags)) - assert.True(t, acc.CheckTaggedValue("memory_limit", uint64(14), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_cache", uint64(15), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_rss", uint64(16), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_rss_huge", uint64(17), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_mapped_file", uint64(18), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_swap_in", uint64(19), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_swap_out", uint64(20), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_page_fault", uint64(21), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_page_major_fault", uint64(22), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_inactive_anon", uint64(23), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_active_anon", uint64(24), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_inactive_file", uint64(25), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_active_file", uint64(26), dockertags)) - assert.True(t, acc.CheckTaggedValue("total_unevictable", uint64(27), dockertags)) } // Asserts that a given accumulator contains a measurment of type float64 with diff --git a/testdata/telegraf-agent.toml b/testdata/telegraf-agent.toml index 98037dc6b..13c059983 100644 --- a/testdata/telegraf-agent.toml +++ b/testdata/telegraf-agent.toml @@ -74,10 +74,6 @@ percpu = false # If no servers are specified, then localhost is used as the host. servers = ["localhost"] -# Read metrics about docker containers -[docker] - # no configuration - # Read stats from one or more Elasticsearch servers or clusters [elasticsearch] From bdfd1aef62e3d2bc7f659cfb5e28eca9a4895f88 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 28 Aug 2015 10:18:46 -0600 Subject: [PATCH 026/125] Update README with 0.1.7 and make separate CONTRIBUTING file --- CONTRIBUTING.md | 130 +++++++++++++++++++++++++++++++++++++++++++++ README.md | 138 ++++++------------------------------------------ 2 files changed, 147 insertions(+), 121 deletions(-) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..c28e52131 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,130 @@ +## Sign the CLA + +Before we can merge a pull request, you will need to sign the CLA, +which can be found [on our website](http://influxdb.com/community/cla.html) + +## Plugins + +This section is for developers that want to create new collection plugins. +Telegraf is entirely plugin driven. This interface allows for operators to +pick and chose what is gathered as well as makes it easy for developers +to create new ways of generating metrics. + +Plugin authorship is kept as simple as possible to promote people to develop +and submit new plugins. + +### Plugin Guidelines + +* A plugin must conform to the `plugins.Plugin` interface. +* Telegraf promises to run each plugin's Gather function serially. This means +developers don't have to worry about thread safety within these functions. +* Each generated metric automatically has the name of the plugin that generated +it prepended. This is to keep plugins honest. +* Plugins should call `plugins.Add` in their `init` function to register themselves. +See below for a quick example. +* To be available within Telegraf itself, plugins must add themselves to the +`github.com/influxdb/telegraf/plugins/all/all.go` file. +* The `SampleConfig` function should return valid toml that describes how the +plugin can be configured. This is include in `telegraf -sample-config`. +* The `Description` function should say in one line what this plugin does. + +### Plugin interface + +```go +type Plugin interface { + SampleConfig() string + Description() string + Gather(Accumulator) error +} + +type Accumulator interface { + Add(measurement string, value interface{}, tags map[string]string) + AddValuesWithTime(measurement string, + values map[string]interface{}, + tags map[string]string, + timestamp time.Time) +} +``` + +### Accumulator + +The way that a plugin emits metrics is by interacting with the Accumulator. + +The `Add` function takes 3 arguments: +* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`. +* **value**: A value for the metric. This accepts 5 different types of value: + * **int**: The most common type. All int types are accepted but favor using `int64` + Useful for counters, etc. + * **float**: Favor `float64`, useful for gauges, percentages, etc. + * **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc. + * **string**: Typically used to indicate a message, or some kind of freeform information. + * **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`. +* **tags**: This is a map of strings to strings to describe the where or who +about the metric. For instance, the `net` plugin adds a tag named `"interface"` +set to the name of the network interface, like `"eth0"`. + +The `AddValuesWithTime` allows multiple values for a point to be passed. The values +used are the same type profile as **value** above. The **timestamp** argument +allows a point to be registered as having occurred at an arbitrary time. + +Let's say you've written a plugin that emits metrics about processes on the current host. + +```go + +type Process struct { + CPUTime float64 + MemoryBytes int64 + PID int +} + +func Gather(acc plugins.Accumulator) error { + for _, process := range system.Processes() { + tags := map[string]string { + "pid": fmt.Sprintf("%d", process.Pid), + } + + acc.Add("cpu", process.CPUTime, tags) + acc.Add("memory", process.MemoryBytes, tags) + } +} +``` + +### Example + +```go +package simple + +// simple.go + +import "github.com/influxdb/telegraf/plugins" + +type Simple struct { + Ok bool +} + +func (s *Simple) Description() string { + return "a demo plugin" +} + +func (s *Simple) SampleConfig() string { + return "ok = true # indicate if everything is fine" +} + +func (s *Simple) Gather(acc plugins.Accumulator) error { + if s.Ok { + acc.Add("state", "pretty good", nil) + } else { + acc.Add("state", "not great", nil) + } + + return nil +} + +func init() { + plugins.Add("simple", func() plugins.Plugin { return &Simple{} }) +} +``` + +## Outputs + +TODO: this section will describe requirements for contributing an output diff --git a/README.md b/README.md index de84f2a8c..df499ae27 100644 --- a/README.md +++ b/README.md @@ -30,8 +30,8 @@ are some InfluxDB compatibility requirements: * InfluxDB 0.9.2 and prior requires Telegraf 0.1.4 Latest: -* http://get.influxdb.org/telegraf/telegraf_0.1.6_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.1.6-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.1.7_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.1.7-1.x86_64.rpm 0.1.4: * http://get.influxdb.org/telegraf/telegraf_0.1.4_amd64.deb @@ -166,135 +166,31 @@ Telegraf currently has support for collecting metrics from We'll be adding support for many more over the coming months. Read on if you want to add support for another service or third-party API. -## Plugins +## Output options -This section is for developers that want to create new collection plugins. -Telegraf is entirely plugin driven. This interface allows for operators to -pick and chose what is gathered as well as makes it easy for developers -to create new ways of generating metrics. +Telegraf also supports specifying multiple output sinks to send data to, +configuring each output sink is different, but examples can be +found by running `telegraf -sample-config` -Plugin authorship is kept as simple as possible to promote people to develop -and submit new plugins. +## Supported Outputs -## Guidelines +* influxdb +* kafka +* datadog -* A plugin must conform to the `plugins.Plugin` interface. -* Telegraf promises to run each plugin's Gather function serially. This means -developers don't have to worry about thread safety within these functions. -* Each generated metric automatically has the name of the plugin that generated -it prepended. This is to keep plugins honest. -* Plugins should call `plugins.Add` in their `init` function to register themselves. -See below for a quick example. -* To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdb/telegraf/plugins/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the -plugin can be configured. This is include in `telegraf -sample-config`. -* The `Description` function should say in one line what this plugin does. +## Contributing -### Plugin interface - -```go -type Plugin interface { - SampleConfig() string - Description() string - Gather(Accumulator) error -} - -type Accumulator interface { - Add(measurement string, value interface{}, tags map[string]string) - AddValuesWithTime(measurement string, - values map[string]interface{}, - tags map[string]string, - timestamp time.Time) -} -``` - -### Accumulator - -The way that a plugin emits metrics is by interacting with the Accumulator. - -The `Add` function takes 3 arguments: -* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`. -* **value**: A value for the metric. This accepts 5 different types of value: - * **int**: The most common type. All int types are accepted but favor using `int64` - Useful for counters, etc. - * **float**: Favor `float64`, useful for gauges, percentages, etc. - * **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc. - * **string**: Typically used to indicate a message, or some kind of freeform information. - * **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`. -* **tags**: This is a map of strings to strings to describe the where or who -about the metric. For instance, the `net` plugin adds a tag named `"interface"` -set to the name of the network interface, like `"eth0"`. - -The `AddValuesWithTime` allows multiple values for a point to be passed. The values -used are the same type profile as **value** above. The **timestamp** argument -allows a point to be registered as having occurred at an arbitrary time. - -Let's say you've written a plugin that emits metrics about processes on the current host. - -```go - -type Process struct { - CPUTime float64 - MemoryBytes int64 - PID int -} - -func Gather(acc plugins.Accumulator) error { - for _, process := range system.Processes() { - tags := map[string]string { - "pid": fmt.Sprintf("%d", process.Pid), - } - - acc.Add("cpu", process.CPUTime, tags) - acc.Add("memory", process.MemoryBytes, tags) - } -} -``` - -### Example - -```go -package simple - -// simple.go - -import "github.com/influxdb/telegraf/plugins" - -type Simple struct { - Ok bool -} - -func (s *Simple) Description() string { - return "a demo plugin" -} - -func (s *Simple) SampleConfig() string { - return "ok = true # indicate if everything is fine" -} - -func (s *Simple) Gather(acc plugins.Accumulator) error { - if s.Ok { - acc.Add("state", "pretty good", nil) - } else { - acc.Add("state", "not great", nil) - } - - return nil -} - -func init() { - plugins.Add("simple", func() plugins.Plugin { return &Simple{} }) -} -``` +Please see the +[contributing guide](https://github.com/influxdb/telegraf/blob/master/CONTRIBUTING.md) +for details on contributing a plugin or output to Telegraf ## Testing -### Execute short tests: +### Execute short tests execute `make test-short` -### Execute long tests: +### Execute long tests As Telegraf collects metrics from several third-party services it becomes a difficult task to mock each service as some of them have complicated protocols @@ -314,7 +210,7 @@ instructions and `brew install docker-compose` - execute `make test` -### Unit test troubleshooting: +### Unit test troubleshooting Try cleaning up your test environment by executing `make test-cleanup` and re-running From d418a6e872b8787af331d57c0fcc403a05964710 Mon Sep 17 00:00:00 2001 From: Michael Desa Date: Fri, 28 Aug 2015 16:17:46 -0700 Subject: [PATCH 027/125] Add list of dependency licenses --- DEPENDENCY_LICENSES.md | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 DEPENDENCY_LICENSES.md diff --git a/DEPENDENCY_LICENSES.md b/DEPENDENCY_LICENSES.md new file mode 100644 index 000000000..c16a73007 --- /dev/null +++ b/DEPENDENCY_LICENSES.md @@ -0,0 +1,33 @@ +# List +- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE) +- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE) +- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE) +- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) +- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE) +- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE) +- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE) +- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE) +- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE) +- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) +- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE) +- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/gonuts/go-shellquote (No License, but the project it was forked from https://github.com/kballard/go-shellquote is [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)). +- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE) +- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) +- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE) +- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md) +- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) +- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE) +- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE) +- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE) +- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) +- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx) +- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt) +- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE) +- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) +- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE) +- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE) + +- golang.org/x/crypto/blowfish +- golang.org/x/crypto/bcrypt From ab191e2b58d2d8560dc42a2f38d58443c51a7d25 Mon Sep 17 00:00:00 2001 From: Michael Desa Date: Fri, 28 Aug 2015 16:25:37 -0700 Subject: [PATCH 028/125] Rename DEPENDENCY_LICENSES LICENSE_OF_DEPENDENCIES Closes #155 Closes #154 --- DEPENDENCY_LICENSES.md => LICENSE_OF_DEPENDENCIES.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename DEPENDENCY_LICENSES.md => LICENSE_OF_DEPENDENCIES.md (100%) diff --git a/DEPENDENCY_LICENSES.md b/LICENSE_OF_DEPENDENCIES.md similarity index 100% rename from DEPENDENCY_LICENSES.md rename to LICENSE_OF_DEPENDENCIES.md From e2bc5d80c96eaeca42d855bb49332576a26041e3 Mon Sep 17 00:00:00 2001 From: Alexander Oleinik Date: Thu, 27 Aug 2015 09:24:26 +0000 Subject: [PATCH 029/125] Apache Plugin Closes #158 Fixes #132 --- CHANGELOG.md | 10 ++- plugins/all/all.go | 1 + plugins/apache/apache.go | 150 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 160 insertions(+), 1 deletion(-) create mode 100644 plugins/apache/apache.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 5db15d9dc..d6f42cb6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,12 @@ -## v0.1.7 [unreleased] +## v0.1.8 [unreleased] + +### Features + +[#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin + +### Bugfixes + +## v0.1.7 [2015-08-28] ### Features - [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer. diff --git a/plugins/all/all.go b/plugins/all/all.go index 8670c3d8b..9400a77a1 100644 --- a/plugins/all/all.go +++ b/plugins/all/all.go @@ -1,6 +1,7 @@ package all import ( + _ "github.com/influxdb/telegraf/plugins/apache" _ "github.com/influxdb/telegraf/plugins/disque" _ "github.com/influxdb/telegraf/plugins/elasticsearch" _ "github.com/influxdb/telegraf/plugins/exec" diff --git a/plugins/apache/apache.go b/plugins/apache/apache.go new file mode 100644 index 000000000..307f84907 --- /dev/null +++ b/plugins/apache/apache.go @@ -0,0 +1,150 @@ +package apache + +import ( + "bufio" + "fmt" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdb/telegraf/plugins" +) + +type Apache struct { + Urls []string +} + +var sampleConfig = ` +# An array of Apache status URI to gather stats. +urls = ["http://localhost/server-status?auto"] +` + +func (n *Apache) SampleConfig() string { + return sampleConfig +} + +func (n *Apache) Description() string { + return "Read Apache status information (mod_status)" +} + +func (n *Apache) Gather(acc plugins.Accumulator) error { + var wg sync.WaitGroup + var outerr error + + for _, u := range n.Urls { + addr, err := url.Parse(u) + if err != nil { + return fmt.Errorf("Unable to parse address '%s': %s", u, err) + } + + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + outerr = n.gatherUrl(addr, acc) + }(addr) + } + + wg.Wait() + + return outerr +} + +var tr = &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), +} + +var client = &http.Client{Transport: tr} + +func (n *Apache) gatherUrl(addr *url.URL, acc plugins.Accumulator) error { + resp, err := client.Get(addr.String()) + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status) + } + + tags := getTags(addr) + + sc := bufio.NewScanner(resp.Body) + for sc.Scan() { + line := sc.Text() + if strings.Contains(line, ":") { + + parts := strings.SplitN(line, ":", 2) + key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1]) + + switch key { + + case "Scoreboard": + n.gatherScores(part, acc, tags) + default: + value, err := strconv.ParseFloat(part, 32) + if err != nil { + continue + } + acc.Add(key, value, tags) + } + } + } + + return nil +} + +func (n *Apache) gatherScores(data string, acc plugins.Accumulator, tags map[string]string) { + + var waiting, open int = 0, 0 + var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0 + + for _, s := range strings.Split(data, "") { + + switch s { + case "_": waiting++ + case "S": S++ + case "R": R++ + case "W": W++ + case "K": K++ + case "D": D++ + case "C": C++ + case "L": L++ + case "G": G++ + case "I": I++ + case ".": open++ + } + } + + acc.Add("scboard_waiting", float64(waiting), tags); + acc.Add("scboard_starting", float64(S), tags); + acc.Add("scboard_reading", float64(R), tags); + acc.Add("scboard_sending", float64(W), tags); + acc.Add("scboard_keepalive", float64(K), tags); + acc.Add("scboard_dnslookup", float64(D), tags); + acc.Add("scboard_closing", float64(C), tags); + acc.Add("scboard_logging", float64(L), tags); + acc.Add("scboard_finishing", float64(G), tags); + acc.Add("scboard_idle_cleanup", float64(I), tags); + acc.Add("scboard_open", float64(open), tags); +} + +// Get tag(s) for the apache plugin +func getTags(addr *url.URL) map[string]string { + h := addr.Host + var htag string + if host, _, err := net.SplitHostPort(h); err == nil { + htag = host + } else { + htag = h + } + return map[string]string{"server": htag} +} + +func init() { + plugins.Add("apache", func() plugins.Plugin { + return &Apache{} + }) +} From 9969c4e810998a8084153ff76769fbff85f7f255 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 31 Aug 2015 14:03:38 -0600 Subject: [PATCH 030/125] Add system uptime metric, string formatted AND in float64 closes #150 --- CHANGELOG.md | 3 +- Godeps/Godeps.json | 7 +- .../cloudfoundry/gosigar/.gitignore | 1 + .../cloudfoundry/gosigar/.travis.yml | 8 + .../github.com/cloudfoundry/gosigar/LICENSE | 201 ++++++++ .../github.com/cloudfoundry/gosigar/NOTICE | 9 + .../github.com/cloudfoundry/gosigar/README.md | 22 + .../cloudfoundry/gosigar/Vagrantfile | 25 + .../cloudfoundry/gosigar/concrete_sigar.go | 69 +++ .../gosigar/concrete_sigar_test.go | 85 ++++ .../cloudfoundry/gosigar/examples/cputimes.go | 52 ++ .../cloudfoundry/gosigar/examples/df.go | 39 ++ .../cloudfoundry/gosigar/examples/free.go | 33 ++ .../cloudfoundry/gosigar/examples/ps.go | 37 ++ .../cloudfoundry/gosigar/examples/uptime.go | 27 + .../cloudfoundry/gosigar/fakes/fake_sigar.go | 72 +++ .../cloudfoundry/gosigar/psnotify/README.md | 50 ++ .../cloudfoundry/gosigar/psnotify/psnotify.go | 136 +++++ .../gosigar/psnotify/psnotify_bsd.go | 93 ++++ .../gosigar/psnotify/psnotify_linux.go | 253 ++++++++++ .../gosigar/psnotify/psnotify_test.go | 283 +++++++++++ .../cloudfoundry/gosigar/sigar_darwin.go | 467 ++++++++++++++++++ .../cloudfoundry/gosigar/sigar_format.go | 126 +++++ .../cloudfoundry/gosigar/sigar_interface.go | 141 ++++++ .../gosigar/sigar_interface_test.go | 135 +++++ .../cloudfoundry/gosigar/sigar_linux.go | 386 +++++++++++++++ .../cloudfoundry/gosigar/sigar_linux_test.go | 225 +++++++++ .../cloudfoundry/gosigar/sigar_suite_test.go | 13 + .../cloudfoundry/gosigar/sigar_unix.go | 26 + .../cloudfoundry/gosigar/sigar_util.go | 22 + .../cloudfoundry/gosigar/sigar_windows.go | 100 ++++ .../gosigar/sigar_windows_test.go | 32 ++ Makefile | 2 +- plugins/system/ps.go | 6 - plugins/system/system.go | 35 +- plugins/system/system_test.go | 20 +- 36 files changed, 3200 insertions(+), 41 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/.gitignore create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/LICENSE create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/NOTICE create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/README.md create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/Vagrantfile create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/concrete_sigar.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/concrete_sigar_test.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/cputimes.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/df.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/free.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/ps.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/uptime.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/fakes/fake_sigar.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/README.md create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_bsd.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_linux.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_test.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_darwin.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_format.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface_test.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_linux.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_linux_test.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_suite_test.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_unix.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_util.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_windows.go create mode 100644 Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_windows_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index d6f42cb6a..940df2cab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,8 @@ ### Features -[#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin +[#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin +[#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 ### Bugfixes diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 9400bc682..8ffab7faa 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/influxdb/telegraf", - "GoVersion": "go1.4.2", + "GoVersion": "go1.5", "Packages": [ "./..." ], @@ -28,6 +28,11 @@ "ImportPath": "github.com/cenkalti/backoff", "Rev": "4dc77674aceaabba2c7e3da25d4c823edfb73f99" }, + { + "ImportPath": "github.com/cloudfoundry/gosigar", + "Comment": "scotty_09012012-27-g3ed7c74", + "Rev": "3ed7c74352dae6dc00bdc8c74045375352e3ec05" + }, { "ImportPath": "github.com/dancannon/gorethink/encoding", "Comment": "v1.x.x-1-g786f12a", diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/.gitignore b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/.gitignore new file mode 100644 index 000000000..8000dd9db --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/.travis.yml b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/.travis.yml new file mode 100644 index 000000000..2a9c5d0c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.2 + +install: + - 'go install github.com/onsi/ginkgo/ginkgo' +script: 'ginkgo -r' diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/LICENSE b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/LICENSE new file mode 100644 index 000000000..11069edd7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/NOTICE b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/NOTICE new file mode 100644 index 000000000..fda553b5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/NOTICE @@ -0,0 +1,9 @@ +Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved. + +This product is licensed to you under the Apache License, Version 2.0 (the "License"). +You may not use this product except in compliance with the License. + +This product includes a number of subcomponents with +separate copyright notices and license terms. Your use of these +subcomponents is subject to the terms and conditions of the +subcomponent's license, as noted in the LICENSE file. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/README.md b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/README.md new file mode 100644 index 000000000..90d51f9b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/README.md @@ -0,0 +1,22 @@ +# Go sigar + +## Overview + +Go sigar is a golang implementation of the +[sigar API](https://github.com/hyperic/sigar). The Go version of +sigar has a very similar interface, but is being written from scratch +in pure go/cgo, rather than cgo bindings for libsigar. + +## Test drive + + $ go get github.com/cloudfoundry/gosigar + $ cd $GOPATH/src/github.com/cloudfoundry/gosigar/examples + $ go run uptime.go + +## Supported platforms + +Currently targeting modern flavors of darwin and linux. + +## License + +Apache 2.0 diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/Vagrantfile b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/Vagrantfile new file mode 100644 index 000000000..6fd990c14 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/Vagrantfile @@ -0,0 +1,25 @@ +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "hashicorp/precise64" + config.vm.provision "shell", inline: "mkdir -p /home/vagrant/go" + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/cloudfoundry/gosigar" + config.vm.provision "shell", inline: "chown -R vagrant:vagrant /home/vagrant/go" + install_go = <<-BASH + set -e + +if [ ! -d "/usr/local/go" ]; then + cd /tmp && wget https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz + cd /usr/local + tar xvzf /tmp/go1.3.3.linux-amd64.tar.gz + echo 'export GOPATH=/home/vagrant/go; export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin' >> /home/vagrant/.bashrc +fi +export GOPATH=/home/vagrant/go +export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin +/usr/local/go/bin/go get -u github.com/onsi/ginkgo/ginkgo +/usr/local/go/bin/go get -u github.com/onsi/gomega; +BASH + config.vm.provision "shell", inline: 'apt-get install -y git-core' + config.vm.provision "shell", inline: install_go +end diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/concrete_sigar.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/concrete_sigar.go new file mode 100644 index 000000000..0e80aa4b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/concrete_sigar.go @@ -0,0 +1,69 @@ +package sigar + +import ( + "time" +) + +type ConcreteSigar struct{} + +func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) { + // samplesCh is buffered to 1 value to immediately return first CPU sample + samplesCh := make(chan Cpu, 1) + + stopCh := make(chan struct{}) + + go func() { + var cpuUsage Cpu + + // Immediately provide non-delta value. + // samplesCh is buffered to 1 value, so it will not block. + cpuUsage.Get() + samplesCh <- cpuUsage + + ticker := time.NewTicker(collectionInterval) + + for { + select { + case <-ticker.C: + previousCpuUsage := cpuUsage + + cpuUsage.Get() + + select { + case samplesCh <- cpuUsage.Delta(previousCpuUsage): + default: + // Include default to avoid channel blocking + } + + case <-stopCh: + return + } + } + }() + + return samplesCh, stopCh +} + +func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) { + l := LoadAverage{} + err := l.Get() + return l, err +} + +func (c *ConcreteSigar) GetMem() (Mem, error) { + m := Mem{} + err := m.Get() + return m, err +} + +func (c *ConcreteSigar) GetSwap() (Swap, error) { + s := Swap{} + err := s.Get() + return s, err +} + +func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) { + f := FileSystemUsage{} + err := f.Get(path) + return f, err +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/concrete_sigar_test.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/concrete_sigar_test.go new file mode 100644 index 000000000..ec51811c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/concrete_sigar_test.go @@ -0,0 +1,85 @@ +package sigar_test + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + sigar "github.com/cloudfoundry/gosigar" +) + +var _ = Describe("ConcreteSigar", func() { + var concreteSigar *sigar.ConcreteSigar + + BeforeEach(func() { + concreteSigar = &sigar.ConcreteSigar{} + }) + + Describe("CollectCpuStats", func() { + It("immediately makes first CPU usage available even though it's not very accurate", func() { + samplesCh, stop := concreteSigar.CollectCpuStats(500 * time.Millisecond) + + firstValue := <-samplesCh + Expect(firstValue.User).To(BeNumerically(">", 0)) + + stop <- struct{}{} + }) + + It("makes CPU usage delta values available", func() { + samplesCh, stop := concreteSigar.CollectCpuStats(500 * time.Millisecond) + + firstValue := <-samplesCh + + secondValue := <-samplesCh + Expect(secondValue.User).To(BeNumerically("<", firstValue.User)) + + stop <- struct{}{} + }) + + It("does not block", func() { + _, stop := concreteSigar.CollectCpuStats(10 * time.Millisecond) + + // Sleep long enough for samplesCh to fill at least 2 values + time.Sleep(20 * time.Millisecond) + + stop <- struct{}{} + + // If CollectCpuStats blocks it will never get here + Expect(true).To(BeTrue()) + }) + }) + + It("GetLoadAverage", func() { + avg, err := concreteSigar.GetLoadAverage() + Expect(avg.One).ToNot(BeNil()) + Expect(avg.Five).ToNot(BeNil()) + Expect(avg.Fifteen).ToNot(BeNil()) + + Expect(err).ToNot(HaveOccurred()) + }) + + It("GetMem", func() { + mem, err := concreteSigar.GetMem() + Expect(err).ToNot(HaveOccurred()) + + Expect(mem.Total).To(BeNumerically(">", 0)) + Expect(mem.Used + mem.Free).To(BeNumerically("<=", mem.Total)) + }) + + It("GetSwap", func() { + swap, err := concreteSigar.GetSwap() + Expect(err).ToNot(HaveOccurred()) + Expect(swap.Used + swap.Free).To(BeNumerically("<=", swap.Total)) + }) + + It("GetSwap", func() { + fsusage, err := concreteSigar.GetFileSystemUsage("/") + Expect(err).ToNot(HaveOccurred()) + Expect(fsusage.Total).ToNot(BeNil()) + + fsusage, err = concreteSigar.GetFileSystemUsage("T O T A L L Y B O G U S") + Expect(err).To(HaveOccurred()) + Expect(fsusage.Total).To(Equal(uint64(0))) + }) +}) diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/cputimes.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/cputimes.go new file mode 100644 index 000000000..cdfcd2f11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/cputimes.go @@ -0,0 +1,52 @@ +package main + +import ( + "fmt" + "time" + + "github.com/cloudfoundry/gosigar" +) + +func main() { + cpus := sigar.CpuList{} + cpus.Get() + tcpu := getOverallCpu(cpus) + + for i, cpu := range cpus.List { + fmt.Printf("CPU%d Ticks: %d\n", i, cpu.Total()) + } + + fmt.Printf("Total CPU Ticks: %d\n", tcpu.Total()) + fmt.Printf("Total CPU Time: %d\n", tcpu.Total()/128) + fmt.Printf("User CPU Time: %d\n", tcpu.User/128) + + time.Sleep(1 * time.Second) + tcpu2 := sigar.Cpu{} + tcpu2.Get() + + dcpu := tcpu2.Delta(tcpu) + tcpuDelta := tcpu2.Total() - tcpu.Total() + iPercentage := 100.0 * float64(dcpu.Idle) / float64(tcpuDelta) + fmt.Printf("Idle percentage: %f\n", iPercentage) + bPercentage := 100.0 * float64(busy(tcpu2)-busy(tcpu)) / float64(tcpuDelta) + fmt.Printf("Busy percentage: %f\n", bPercentage) +} + +func busy(c sigar.Cpu) uint64 { + return c.Total() - c.Idle +} + +func getOverallCpu(cl sigar.CpuList) sigar.Cpu { + var overallCpu sigar.Cpu + for _, c := range cl.List { + overallCpu.User += c.User + overallCpu.Nice += c.Nice + overallCpu.Sys += c.Sys + overallCpu.Idle += c.Idle + overallCpu.Wait += c.Wait + overallCpu.Irq += c.Irq + overallCpu.SoftIrq += c.SoftIrq + overallCpu.Stolen += c.Stolen + } + return overallCpu +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/df.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/df.go new file mode 100644 index 000000000..96c92f41d --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/df.go @@ -0,0 +1,39 @@ +// Copyright (c) 2012 VMware, Inc. + +package main + +import ( + "fmt" + "github.com/cloudfoundry/gosigar" + "os" +) + +const output_format = "%-15s %4s %4s %5s %4s %-15s\n" + +func formatSize(size uint64) string { + return sigar.FormatSize(size * 1024) +} + +func main() { + fslist := sigar.FileSystemList{} + fslist.Get() + + fmt.Fprintf(os.Stdout, output_format, + "Filesystem", "Size", "Used", "Avail", "Use%", "Mounted on") + + for _, fs := range fslist.List { + dir_name := fs.DirName + + usage := sigar.FileSystemUsage{} + + usage.Get(dir_name) + + fmt.Fprintf(os.Stdout, output_format, + fs.DevName, + formatSize(usage.Total), + formatSize(usage.Used), + formatSize(usage.Avail), + sigar.FormatPercent(usage.UsePercent()), + dir_name) + } +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/free.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/free.go new file mode 100644 index 000000000..9bf9d3db3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/free.go @@ -0,0 +1,33 @@ +// Copyright (c) 2012 VMware, Inc. + +package main + +import ( + "fmt" + "github.com/cloudfoundry/gosigar" + "os" +) + +func format(val uint64) uint64 { + return val / 1024 +} + +func main() { + mem := sigar.Mem{} + swap := sigar.Swap{} + + mem.Get() + swap.Get() + + fmt.Fprintf(os.Stdout, "%18s %10s %10s\n", + "total", "used", "free") + + fmt.Fprintf(os.Stdout, "Mem: %10d %10d %10d\n", + format(mem.Total), format(mem.Used), format(mem.Free)) + + fmt.Fprintf(os.Stdout, "-/+ buffers/cache: %10d %10d\n", + format(mem.ActualUsed), format(mem.ActualFree)) + + fmt.Fprintf(os.Stdout, "Swap: %10d %10d %10d\n", + format(swap.Total), format(swap.Used), format(swap.Free)) +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/ps.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/ps.go new file mode 100644 index 000000000..e3cc2281f --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/ps.go @@ -0,0 +1,37 @@ +// Copyright (c) 2012 VMware, Inc. + +package main + +import ( + "fmt" + "github.com/cloudfoundry/gosigar" +) + +func main() { + pids := sigar.ProcList{} + pids.Get() + + // ps -eo pid,ppid,stime,time,rss,state,comm + fmt.Print(" PID PPID STIME TIME RSS S COMMAND\n") + + for _, pid := range pids.List { + state := sigar.ProcState{} + mem := sigar.ProcMem{} + time := sigar.ProcTime{} + + if err := state.Get(pid); err != nil { + continue + } + if err := mem.Get(pid); err != nil { + continue + } + if err := time.Get(pid); err != nil { + continue + } + + fmt.Printf("%5d %5d %s %s %6d %c %s\n", + pid, state.Ppid, + time.FormatStartTime(), time.FormatTotal(), + mem.Resident/1024, state.State, state.Name) + } +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/uptime.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/uptime.go new file mode 100644 index 000000000..337a9b01a --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/examples/uptime.go @@ -0,0 +1,27 @@ +// Copyright (c) 2012 VMware, Inc. + +package main + +import ( + "fmt" + "github.com/cloudfoundry/gosigar" + "os" + "time" +) + +func main() { + concreteSigar := sigar.ConcreteSigar{} + + uptime := sigar.Uptime{} + uptime.Get() + avg, err := concreteSigar.GetLoadAverage() + if err != nil { + fmt.Printf("Failed to get load average") + return + } + + fmt.Fprintf(os.Stdout, " %s up %s load average: %.2f, %.2f, %.2f\n", + time.Now().Format("15:04:05"), + uptime.Format(), + avg.One, avg.Five, avg.Fifteen) +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/fakes/fake_sigar.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/fakes/fake_sigar.go new file mode 100644 index 000000000..6fb77417c --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/fakes/fake_sigar.go @@ -0,0 +1,72 @@ +package fakes + +import ( + "time" + + sigar "github.com/cloudfoundry/gosigar" +) + +type FakeSigar struct { + LoadAverage sigar.LoadAverage + LoadAverageErr error + + Mem sigar.Mem + MemErr error + + Swap sigar.Swap + SwapErr error + + FileSystemUsage sigar.FileSystemUsage + FileSystemUsageErr error + FileSystemUsagePath string + + CollectCpuStatsCpuCh chan sigar.Cpu + CollectCpuStatsStopCh chan struct{} +} + +func NewFakeSigar() *FakeSigar { + return &FakeSigar{ + CollectCpuStatsCpuCh: make(chan sigar.Cpu, 1), + CollectCpuStatsStopCh: make(chan struct{}), + } +} + +func (f *FakeSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan sigar.Cpu, chan<- struct{}) { + samplesCh := make(chan sigar.Cpu, 1) + stopCh := make(chan struct{}) + + go func() { + for { + select { + case cpuStat := <-f.CollectCpuStatsCpuCh: + select { + case samplesCh <- cpuStat: + default: + // Include default to avoid channel blocking + } + + case <-f.CollectCpuStatsStopCh: + return + } + } + }() + + return samplesCh, stopCh +} + +func (f *FakeSigar) GetLoadAverage() (sigar.LoadAverage, error) { + return f.LoadAverage, f.LoadAverageErr +} + +func (f *FakeSigar) GetMem() (sigar.Mem, error) { + return f.Mem, f.MemErr +} + +func (f *FakeSigar) GetSwap() (sigar.Swap, error) { + return f.Swap, f.SwapErr +} + +func (f *FakeSigar) GetFileSystemUsage(path string) (sigar.FileSystemUsage, error) { + f.FileSystemUsagePath = path + return f.FileSystemUsage, f.FileSystemUsageErr +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/README.md b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/README.md new file mode 100644 index 000000000..dd34ebcfb --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/README.md @@ -0,0 +1,50 @@ +# Process notifications for Go + +## Overview + +The psnotify package captures process events from the kernel via +kqueue on Darwin/BSD and the netlink connector on Linux. + +The psnotify API is similar to the +[fsnotify](https://github.com/howeyc/fsnotify) package. + +Example: +```go + watcher, err := psnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + + // Process events + go func() { + for { + select { + case ev := <-watcher.Fork: + log.Println("fork event:", ev) + case ev := <-watcher.Exec: + log.Println("exec event:", ev) + case ev := <-watcher.Exit: + log.Println("exit event:", ev) + case err := <-watcher.Error: + log.Println("error:", err) + } + } + }() + + err = watcher.Watch(os.Getpid(), psnotify.PROC_EVENT_ALL) + if err != nil { + log.Fatal(err) + } + + /* ... do stuff ... */ + watcher.Close() +``` + +## Supported platforms + +Currently targeting modern flavors of Darwin and Linux. +Should work on BSD, but untested. + +## License + +Apache 2.0 diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify.go new file mode 100644 index 000000000..6a69f4de2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify.go @@ -0,0 +1,136 @@ +// Copyright (c) 2012 VMware, Inc. + +package psnotify + +import ( + "errors" + "fmt" +) + +type ProcEventFork struct { + ParentPid int // Pid of the process that called fork() + ChildPid int // Child process pid created by fork() +} + +type ProcEventExec struct { + Pid int // Pid of the process that called exec() +} + +type ProcEventExit struct { + Pid int // Pid of the process that called exit() +} + +type watch struct { + flags uint32 // Saved value of Watch() flags param +} + +type eventListener interface { + close() error // Watch.Close() closes the OS specific listener +} + +type Watcher struct { + listener eventListener // OS specifics (kqueue or netlink) + watches map[int]*watch // Map of watched process ids + Error chan error // Errors are sent on this channel + Fork chan *ProcEventFork // Fork events are sent on this channel + Exec chan *ProcEventExec // Exec events are sent on this channel + Exit chan *ProcEventExit // Exit events are sent on this channel + done chan bool // Used to stop the readEvents() goroutine + isClosed bool // Set to true when Close() is first called +} + +// Initialize event listener and channels +func NewWatcher() (*Watcher, error) { + listener, err := createListener() + + if err != nil { + return nil, err + } + + w := &Watcher{ + listener: listener, + watches: make(map[int]*watch), + Fork: make(chan *ProcEventFork), + Exec: make(chan *ProcEventExec), + Exit: make(chan *ProcEventExit), + Error: make(chan error), + done: make(chan bool, 1), + } + + go w.readEvents() + return w, nil +} + +// Close event channels when done message is received +func (w *Watcher) finish() { + close(w.Fork) + close(w.Exec) + close(w.Exit) + close(w.Error) +} + +// Closes the OS specific event listener, +// removes all watches and closes all event channels. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + for pid := range w.watches { + w.RemoveWatch(pid) + } + + w.done <- true + + w.listener.close() + + return nil +} + +// Add pid to the watched process set. +// The flags param is a bitmask of process events to capture, +// must be one or more of: PROC_EVENT_FORK, PROC_EVENT_EXEC, PROC_EVENT_EXIT +func (w *Watcher) Watch(pid int, flags uint32) error { + if w.isClosed { + return errors.New("psnotify watcher is closed") + } + + watchEntry, found := w.watches[pid] + + if found { + watchEntry.flags |= flags + } else { + if err := w.register(pid, flags); err != nil { + return err + } + w.watches[pid] = &watch{flags: flags} + } + + return nil +} + +// Remove pid from the watched process set. +func (w *Watcher) RemoveWatch(pid int) error { + _, ok := w.watches[pid] + if !ok { + msg := fmt.Sprintf("watch for pid=%d does not exist", pid) + return errors.New(msg) + } + delete(w.watches, pid) + return w.unregister(pid) +} + +// Internal helper to check if there is a message on the "done" channel. +// The "done" message is sent by the Close() method; when received here, +// the Watcher.finish method is called to close all channels and return +// true - in which case the caller should break from the readEvents loop. +func (w *Watcher) isDone() bool { + var done bool + select { + case done = <-w.done: + w.finish() + default: + } + return done +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_bsd.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_bsd.go new file mode 100644 index 000000000..e147d7638 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_bsd.go @@ -0,0 +1,93 @@ +// Copyright (c) 2012 VMware, Inc. + +// +build darwin freebsd netbsd openbsd + +// Go interface to BSD kqueue process events. +package psnotify + +import ( + "syscall" +) + +const ( + // Flags (from ) + PROC_EVENT_FORK = syscall.NOTE_FORK // fork() events + PROC_EVENT_EXEC = syscall.NOTE_EXEC // exec() events + PROC_EVENT_EXIT = syscall.NOTE_EXIT // exit() events + + // Watch for all process events + PROC_EVENT_ALL = PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_EXIT +) + +type kqueueListener struct { + kq int // The syscall.Kqueue() file descriptor + buf [1]syscall.Kevent_t // An event buffer for Add/Remove watch +} + +// Initialize bsd implementation of the eventListener interface +func createListener() (eventListener, error) { + listener := &kqueueListener{} + kq, err := syscall.Kqueue() + listener.kq = kq + return listener, err +} + +// Initialize Kevent_t fields and propagate changelist for the given pid +func (w *Watcher) kevent(pid int, fflags uint32, flags int) error { + listener, _ := w.listener.(*kqueueListener) + event := &listener.buf[0] + + syscall.SetKevent(event, pid, syscall.EVFILT_PROC, flags) + event.Fflags = fflags + + _, err := syscall.Kevent(listener.kq, listener.buf[:], nil, nil) + + return err +} + +// Delete filter for given pid from the queue +func (w *Watcher) unregister(pid int) error { + return w.kevent(pid, 0, syscall.EV_DELETE) +} + +// Add and enable filter for given pid in the queue +func (w *Watcher) register(pid int, flags uint32) error { + return w.kevent(pid, flags, syscall.EV_ADD|syscall.EV_ENABLE) +} + +// Poll the kqueue file descriptor and dispatch to the Event channels +func (w *Watcher) readEvents() { + listener, _ := w.listener.(*kqueueListener) + events := make([]syscall.Kevent_t, 10) + + for { + if w.isDone() { + return + } + + n, err := syscall.Kevent(listener.kq, nil, events, nil) + if err != nil { + w.Error <- err + continue + } + + for _, ev := range events[:n] { + pid := int(ev.Ident) + + switch ev.Fflags { + case syscall.NOTE_FORK: + w.Fork <- &ProcEventFork{ParentPid: pid} + case syscall.NOTE_EXEC: + w.Exec <- &ProcEventExec{Pid: pid} + case syscall.NOTE_EXIT: + w.RemoveWatch(pid) + w.Exit <- &ProcEventExit{Pid: pid} + } + } + } +} + +// Close our kqueue file descriptor; deletes any remaining filters +func (listener *kqueueListener) close() error { + return syscall.Close(listener.kq) +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_linux.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_linux.go new file mode 100644 index 000000000..f9154ef3d --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_linux.go @@ -0,0 +1,253 @@ +// Copyright (c) 2012 VMware, Inc. + +// Go interface to the Linux netlink process connector. +// See Documentation/connector/connector.txt in the linux kernel source tree. +package psnotify + +import ( + "bytes" + "encoding/binary" + "os" + "syscall" +) + +const ( + // internal flags (from ) + _CN_IDX_PROC = 0x1 + _CN_VAL_PROC = 0x1 + + // internal flags (from ) + _PROC_CN_MCAST_LISTEN = 1 + _PROC_CN_MCAST_IGNORE = 2 + + // Flags (from ) + PROC_EVENT_FORK = 0x00000001 // fork() events + PROC_EVENT_EXEC = 0x00000002 // exec() events + PROC_EVENT_EXIT = 0x80000000 // exit() events + + // Watch for all process events + PROC_EVENT_ALL = PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_EXIT +) + +var ( + byteOrder = binary.LittleEndian +) + +// linux/connector.h: struct cb_id +type cbId struct { + Idx uint32 + Val uint32 +} + +// linux/connector.h: struct cb_msg +type cnMsg struct { + Id cbId + Seq uint32 + Ack uint32 + Len uint16 + Flags uint16 +} + +// linux/cn_proc.h: struct proc_event.{what,cpu,timestamp_ns} +type procEventHeader struct { + What uint32 + Cpu uint32 + Timestamp uint64 +} + +// linux/cn_proc.h: struct proc_event.fork +type forkProcEvent struct { + ParentPid uint32 + ParentTgid uint32 + ChildPid uint32 + ChildTgid uint32 +} + +// linux/cn_proc.h: struct proc_event.exec +type execProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 +} + +// linux/cn_proc.h: struct proc_event.exit +type exitProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + ExitCode uint32 + ExitSignal uint32 +} + +// standard netlink header + connector header +type netlinkProcMessage struct { + Header syscall.NlMsghdr + Data cnMsg +} + +type netlinkListener struct { + addr *syscall.SockaddrNetlink // Netlink socket address + sock int // The syscall.Socket() file descriptor + seq uint32 // struct cn_msg.seq +} + +// Initialize linux implementation of the eventListener interface +func createListener() (eventListener, error) { + listener := &netlinkListener{} + err := listener.bind() + return listener, err +} + +// noop on linux +func (w *Watcher) unregister(pid int) error { + return nil +} + +// noop on linux +func (w *Watcher) register(pid int, flags uint32) error { + return nil +} + +// Read events from the netlink socket +func (w *Watcher) readEvents() { + buf := make([]byte, syscall.Getpagesize()) + + listener, _ := w.listener.(*netlinkListener) + + for { + if w.isDone() { + return + } + + nr, _, err := syscall.Recvfrom(listener.sock, buf, 0) + + if err != nil { + w.Error <- err + continue + } + if nr < syscall.NLMSG_HDRLEN { + w.Error <- syscall.EINVAL + continue + } + + msgs, _ := syscall.ParseNetlinkMessage(buf[:nr]) + + for _, m := range msgs { + if m.Header.Type == syscall.NLMSG_DONE { + w.handleEvent(m.Data) + } + } + } +} + +// Internal helper to check if pid && event is being watched +func (w *Watcher) isWatching(pid int, event uint32) bool { + if watch, ok := w.watches[pid]; ok { + return (watch.flags & event) == event + } + return false +} + +// Dispatch events from the netlink socket to the Event channels. +// Unlike bsd kqueue, netlink receives events for all pids, +// so we apply filtering based on the watch table via isWatching() +func (w *Watcher) handleEvent(data []byte) { + buf := bytes.NewBuffer(data) + msg := &cnMsg{} + hdr := &procEventHeader{} + + binary.Read(buf, byteOrder, msg) + binary.Read(buf, byteOrder, hdr) + + switch hdr.What { + case PROC_EVENT_FORK: + event := &forkProcEvent{} + binary.Read(buf, byteOrder, event) + ppid := int(event.ParentTgid) + pid := int(event.ChildTgid) + + if w.isWatching(ppid, PROC_EVENT_EXEC) { + // follow forks + watch, _ := w.watches[ppid] + w.Watch(pid, watch.flags) + } + + if w.isWatching(ppid, PROC_EVENT_FORK) { + w.Fork <- &ProcEventFork{ParentPid: ppid, ChildPid: pid} + } + case PROC_EVENT_EXEC: + event := &execProcEvent{} + binary.Read(buf, byteOrder, event) + pid := int(event.ProcessTgid) + + if w.isWatching(pid, PROC_EVENT_EXEC) { + w.Exec <- &ProcEventExec{Pid: pid} + } + case PROC_EVENT_EXIT: + event := &exitProcEvent{} + binary.Read(buf, byteOrder, event) + pid := int(event.ProcessTgid) + + if w.isWatching(pid, PROC_EVENT_EXIT) { + w.RemoveWatch(pid) + w.Exit <- &ProcEventExit{Pid: pid} + } + } +} + +// Bind our netlink socket and +// send a listen control message to the connector driver. +func (listener *netlinkListener) bind() error { + sock, err := syscall.Socket( + syscall.AF_NETLINK, + syscall.SOCK_DGRAM, + syscall.NETLINK_CONNECTOR) + + if err != nil { + return err + } + + listener.sock = sock + listener.addr = &syscall.SockaddrNetlink{ + Family: syscall.AF_NETLINK, + Groups: _CN_IDX_PROC, + } + + err = syscall.Bind(listener.sock, listener.addr) + + if err != nil { + return err + } + + return listener.send(_PROC_CN_MCAST_LISTEN) +} + +// Send an ignore control message to the connector driver +// and close our netlink socket. +func (listener *netlinkListener) close() error { + err := listener.send(_PROC_CN_MCAST_IGNORE) + syscall.Close(listener.sock) + return err +} + +// Generic method for sending control messages to the connector +// driver; where op is one of PROC_CN_MCAST_{LISTEN,IGNORE} +func (listener *netlinkListener) send(op uint32) error { + listener.seq++ + pr := &netlinkProcMessage{} + plen := binary.Size(pr.Data) + binary.Size(op) + pr.Header.Len = syscall.NLMSG_HDRLEN + uint32(plen) + pr.Header.Type = uint16(syscall.NLMSG_DONE) + pr.Header.Flags = 0 + pr.Header.Seq = listener.seq + pr.Header.Pid = uint32(os.Getpid()) + + pr.Data.Id.Idx = _CN_IDX_PROC + pr.Data.Id.Val = _CN_VAL_PROC + + pr.Data.Len = uint16(binary.Size(op)) + + buf := bytes.NewBuffer(make([]byte, 0, pr.Header.Len)) + binary.Write(buf, byteOrder, pr) + binary.Write(buf, byteOrder, op) + + return syscall.Sendto(listener.sock, buf.Bytes(), 0, listener.addr) +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_test.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_test.go new file mode 100644 index 000000000..28f38a8d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_test.go @@ -0,0 +1,283 @@ +// Copyright (c) 2012 VMware, Inc. + +package psnotify + +import ( + "fmt" + "os" + "os/exec" + "runtime" + "syscall" + "testing" + "time" +) + +type anyEvent struct { + exits []int + forks []int + execs []int + errors []error + done chan bool +} + +type testWatcher struct { + t *testing.T + watcher *Watcher + events *anyEvent +} + +// General purpose Watcher wrapper for all tests +func newTestWatcher(t *testing.T) *testWatcher { + watcher, err := NewWatcher() + if err != nil { + t.Fatal(err) + } + + events := &anyEvent{ + done: make(chan bool, 1), + } + + tw := &testWatcher{ + t: t, + watcher: watcher, + events: events, + } + + go func() { + for { + select { + case <-events.done: + return + case ev := <-watcher.Fork: + events.forks = append(events.forks, ev.ParentPid) + case ev := <-watcher.Exec: + events.execs = append(events.execs, ev.Pid) + case ev := <-watcher.Exit: + events.exits = append(events.exits, ev.Pid) + case err := <-watcher.Error: + events.errors = append(events.errors, err) + } + } + }() + + return tw +} + +func (tw *testWatcher) close() { + pause := 100 * time.Millisecond + time.Sleep(pause) + + tw.events.done <- true + + tw.watcher.Close() + + time.Sleep(pause) +} + +func skipTest(t *testing.T) bool { + if runtime.GOOS == "linux" && os.Getuid() != 0 { + fmt.Println("SKIP: test must be run as root on linux") + return true + } + return false +} + +func startSleepCommand(t *testing.T) *exec.Cmd { + cmd := exec.Command("sh", "-c", "sleep 100") + if err := cmd.Start(); err != nil { + t.Error(err) + } + return cmd +} + +func runCommand(t *testing.T, name string) *exec.Cmd { + cmd := exec.Command(name) + if err := cmd.Run(); err != nil { + t.Error(err) + } + return cmd +} + +func expectEvents(t *testing.T, num int, name string, pids []int) bool { + if len(pids) != num { + t.Errorf("Expected %d %s events, got=%v", num, name, pids) + return false + } + return true +} + +func expectEventPid(t *testing.T, name string, expect int, pid int) bool { + if expect != pid { + t.Errorf("Expected %s pid=%d, received=%d", name, expect, pid) + return false + } + return true +} + +func TestWatchFork(t *testing.T) { + if skipTest(t) { + return + } + + pid := os.Getpid() + + tw := newTestWatcher(t) + + // no watches added yet, so this fork event will no be captured + runCommand(t, "date") + + // watch fork events for this process + if err := tw.watcher.Watch(pid, PROC_EVENT_FORK); err != nil { + t.Error(err) + } + + // this fork event will be captured, + // the exec and exit events will not be captured + runCommand(t, "cal") + + tw.close() + + if expectEvents(t, 1, "forks", tw.events.forks) { + expectEventPid(t, "fork", pid, tw.events.forks[0]) + } + + expectEvents(t, 0, "execs", tw.events.execs) + expectEvents(t, 0, "exits", tw.events.exits) +} + +func TestWatchExit(t *testing.T) { + if skipTest(t) { + return + } + + tw := newTestWatcher(t) + + cmd := startSleepCommand(t) + + childPid := cmd.Process.Pid + + // watch for exit event of our child process + if err := tw.watcher.Watch(childPid, PROC_EVENT_EXIT); err != nil { + t.Error(err) + } + + // kill our child process, triggers exit event + syscall.Kill(childPid, syscall.SIGTERM) + + cmd.Wait() + + tw.close() + + expectEvents(t, 0, "forks", tw.events.forks) + + expectEvents(t, 0, "execs", tw.events.execs) + + if expectEvents(t, 1, "exits", tw.events.exits) { + expectEventPid(t, "exit", childPid, tw.events.exits[0]) + } +} + +// combined version of TestWatchFork() and TestWatchExit() +func TestWatchForkAndExit(t *testing.T) { + if skipTest(t) { + return + } + + pid := os.Getpid() + + tw := newTestWatcher(t) + + if err := tw.watcher.Watch(pid, PROC_EVENT_FORK); err != nil { + t.Error(err) + } + + cmd := startSleepCommand(t) + + childPid := cmd.Process.Pid + + if err := tw.watcher.Watch(childPid, PROC_EVENT_EXIT); err != nil { + t.Error(err) + } + + syscall.Kill(childPid, syscall.SIGTERM) + + cmd.Wait() + + tw.close() + + if expectEvents(t, 1, "forks", tw.events.forks) { + expectEventPid(t, "fork", pid, tw.events.forks[0]) + } + + expectEvents(t, 0, "execs", tw.events.execs) + + if expectEvents(t, 1, "exits", tw.events.exits) { + expectEventPid(t, "exit", childPid, tw.events.exits[0]) + } +} + +func TestWatchFollowFork(t *testing.T) { + if skipTest(t) { + return + } + + // Darwin is not able to follow forks, as the kqueue fork event + // does not provide the child pid. + if runtime.GOOS != "linux" { + fmt.Println("SKIP: test follow forks is linux only") + return + } + + pid := os.Getpid() + + tw := newTestWatcher(t) + + // watch for all process events related to this process + if err := tw.watcher.Watch(pid, PROC_EVENT_ALL); err != nil { + t.Error(err) + } + + commands := []string{"date", "cal"} + childPids := make([]int, len(commands)) + + // triggers fork/exec/exit events for each command + for i, name := range commands { + cmd := runCommand(t, name) + childPids[i] = cmd.Process.Pid + } + + // remove watch for this process + tw.watcher.RemoveWatch(pid) + + // run commands again to make sure we don't receive any unwanted events + for _, name := range commands { + runCommand(t, name) + } + + tw.close() + + // run commands again to make sure nothing panics after + // closing the watcher + for _, name := range commands { + runCommand(t, name) + } + + num := len(commands) + if expectEvents(t, num, "forks", tw.events.forks) { + for _, epid := range tw.events.forks { + expectEventPid(t, "fork", pid, epid) + } + } + + if expectEvents(t, num, "execs", tw.events.execs) { + for i, epid := range tw.events.execs { + expectEventPid(t, "exec", childPids[i], epid) + } + } + + if expectEvents(t, num, "exits", tw.events.exits) { + for i, epid := range tw.events.exits { + expectEventPid(t, "exit", childPids[i], epid) + } + } +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_darwin.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_darwin.go new file mode 100644 index 000000000..e3a8c4b9c --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_darwin.go @@ -0,0 +1,467 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "syscall" + "time" + "unsafe" +) + +func (self *LoadAverage) Get() error { + avg := []C.double{0, 0, 0} + + C.getloadavg(&avg[0], C.int(len(avg))) + + self.One = float64(avg[0]) + self.Five = float64(avg[1]) + self.Fifteen = float64(avg[2]) + + return nil +} + +func (self *Uptime) Get() error { + tv := syscall.Timeval32{} + + if err := sysctlbyname("kern.boottime", &tv); err != nil { + return err + } + + self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds() + + return nil +} + +func (self *Mem) Get() error { + var vmstat C.vm_statistics_data_t + + if err := sysctlbyname("hw.memsize", &self.Total); err != nil { + return err + } + + if err := vm_info(&vmstat); err != nil { + return err + } + + kern := uint64(vmstat.inactive_count) << 12 + self.Free = uint64(vmstat.free_count) << 12 + + self.Used = self.Total - self.Free + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +type xsw_usage struct { + Total, Avail, Used uint64 +} + +func (self *Swap) Get() error { + sw_usage := xsw_usage{} + + if err := sysctlbyname("vm.swapusage", &sw_usage); err != nil { + return err + } + + self.Total = sw_usage.Total + self.Used = sw_usage.Used + self.Free = sw_usage.Avail + + return nil +} + +func (self *Cpu) Get() error { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpuload C.host_cpu_load_info_data_t + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics error=%d", status) + } + + self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER]) + self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) + self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) + self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) + + return nil +} + +func (self *CpuList) Get() error { + var count C.mach_msg_type_number_t + var cpuload *C.processor_cpu_load_info_data_t + var ncpu C.natural_t + + status := C.host_processor_info(C.host_t(C.mach_host_self()), + C.PROCESSOR_CPU_LOAD_INFO, + &ncpu, + (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_processor_info error=%d", status) + } + + // jump through some cgo casting hoops and ensure we properly free + // the memory that cpuload points to + target := C.vm_map_t(C.mach_task_self_) + address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) + defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) + + // the body of struct processor_cpu_load_info + // aka processor_cpu_load_info_data_t + var cpu_ticks [C.CPU_STATE_MAX]uint32 + + // copy the cpuload array to a []byte buffer + // where we can binary.Read the data + size := int(ncpu) * binary.Size(cpu_ticks) + buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) + + bbuf := bytes.NewBuffer(buf) + + self.List = make([]Cpu, 0, ncpu) + + for i := 0; i < int(ncpu); i++ { + cpu := Cpu{} + + err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) + if err != nil { + return err + } + + cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER]) + cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM]) + cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE]) + cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE]) + + self.List = append(self.List, cpu) + } + + return nil +} + +func (self *FileSystemList) Get() error { + num, err := getfsstat(nil, C.MNT_NOWAIT) + if num < 0 { + return err + } + + buf := make([]syscall.Statfs_t, num) + + num, err = getfsstat(buf, C.MNT_NOWAIT) + if err != nil { + return err + } + + fslist := make([]FileSystem, 0, num) + + for i := 0; i < num; i++ { + fs := FileSystem{} + + fs.DirName = bytePtrToString(&buf[i].Mntonname[0]) + fs.DevName = bytePtrToString(&buf[i].Mntfromname[0]) + fs.SysTypeName = bytePtrToString(&buf[i].Fstypename[0]) + + fslist = append(fslist, fs) + } + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + n := C.proc_listpids(C.PROC_ALL_PIDS, 0, nil, 0) + if n <= 0 { + return syscall.EINVAL + } + buf := make([]byte, n) + n = C.proc_listpids(C.PROC_ALL_PIDS, 0, unsafe.Pointer(&buf[0]), n) + if n <= 0 { + return syscall.ENOMEM + } + + var pid int32 + num := int(n) / binary.Size(pid) + list := make([]int, 0, num) + bbuf := bytes.NewBuffer(buf) + + for i := 0; i < num; i++ { + if err := binary.Read(bbuf, binary.LittleEndian, &pid); err != nil { + return err + } + if pid == 0 { + continue + } + + list = append(list, int(pid)) + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Name = C.GoString(&info.pbsd.pbi_comm[0]) + + switch info.pbsd.pbi_status { + case C.SIDL: + self.State = RunStateIdle + case C.SRUN: + self.State = RunStateRun + case C.SSLEEP: + self.State = RunStateSleep + case C.SSTOP: + self.State = RunStateStop + case C.SZOMB: + self.State = RunStateZombie + default: + self.State = RunStateUnknown + } + + self.Ppid = int(info.pbsd.pbi_ppid) + + self.Tty = int(info.pbsd.e_tdev) + + self.Priority = int(info.ptinfo.pti_priority) + + self.Nice = int(info.pbsd.pbi_nice) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Size = uint64(info.ptinfo.pti_virtual_size) + self.Resident = uint64(info.ptinfo.pti_resident_size) + self.PageFaults = uint64(info.ptinfo.pti_faults) + + return nil +} + +func (self *ProcTime) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.User = + uint64(info.ptinfo.pti_total_user) / uint64(time.Millisecond) + + self.Sys = + uint64(info.ptinfo.pti_total_system) / uint64(time.Millisecond) + + self.Total = self.User + self.Sys + + self.StartTime = (uint64(info.pbsd.pbi_start_tvsec) * 1000) + + (uint64(info.pbsd.pbi_start_tvusec) / 1000) + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + var args []string + + argv := func(arg string) { + args = append(args, arg) + } + + err := kern_procargs(pid, nil, argv, nil) + + self.List = args + + return err +} + +func (self *ProcExe) Get(pid int) error { + exe := func(arg string) { + self.Name = arg + } + + return kern_procargs(pid, exe, nil, nil) +} + +// wrapper around sysctl KERN_PROCARGS2 +// callbacks params are optional, +// up to the caller as to which pieces of data they want +func kern_procargs(pid int, + exe func(string), + argv func(string), + env func(string, string)) error { + + mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} + argmax := uintptr(C.ARG_MAX) + buf := make([]byte, argmax) + err := sysctl(mib, &buf[0], &argmax, nil, 0) + if err != nil { + return nil + } + + bbuf := bytes.NewBuffer(buf) + bbuf.Truncate(int(argmax)) + + var argc int32 + binary.Read(bbuf, binary.LittleEndian, &argc) + + path, err := bbuf.ReadBytes(0) + if exe != nil { + exe(string(chop(path))) + } + + // skip trailing \0's + for { + c, _ := bbuf.ReadByte() + if c != 0 { + bbuf.UnreadByte() + break // start of argv[0] + } + } + + for i := 0; i < int(argc); i++ { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + if argv != nil { + argv(string(chop(arg))) + } + } + + if env == nil { + return nil + } + + delim := []byte{61} // "=" + + for { + line, err := bbuf.ReadBytes(0) + if err == io.EOF || line[0] == 0 { + break + } + pair := bytes.SplitN(chop(line), delim, 2) + env(string(pair[0]), string(pair[1])) + } + + return nil +} + +// XXX copied from zsyscall_darwin_amd64.go +func sysctl(mib []C.int, old *byte, oldlen *uintptr, + new *byte, newlen uintptr) (err error) { + var p0 unsafe.Pointer + p0 = unsafe.Pointer(&mib[0]) + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = e1 + } + return +} + +func vm_info(vmstat *C.vm_statistics_data_t) error { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT + + status := C.host_statistics( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO, + C.host_info_t(unsafe.Pointer(vmstat)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics=%d", status) + } + + return nil +} + +// generic Sysctl buffer unmarshalling +func sysctlbyname(name string, data interface{}) (err error) { + val, err := syscall.Sysctl(name) + if err != nil { + return err + } + + buf := []byte(val) + + switch v := data.(type) { + case *uint64: + *v = *(*uint64)(unsafe.Pointer(&buf[0])) + return + } + + bbuf := bytes.NewBuffer([]byte(val)) + return binary.Read(bbuf, binary.LittleEndian, data) +} + +// syscall.Getfsstat() wrapper is broken, roll our own to workaround. +func getfsstat(buf []syscall.Statfs_t, flags int) (n int, err error) { + var ptr uintptr + var size uintptr + + if len(buf) > 0 { + ptr = uintptr(unsafe.Pointer(&buf[0])) + size = unsafe.Sizeof(buf[0]) * uintptr(len(buf)) + } else { + ptr = uintptr(0) + size = uintptr(0) + } + + trap := uintptr(syscall.SYS_GETFSSTAT64) + ret, _, errno := syscall.Syscall(trap, ptr, size, uintptr(flags)) + + n = int(ret) + if errno != 0 { + err = errno + } + + return +} + +func task_info(pid int, info *C.struct_proc_taskallinfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if n != size { + return syscall.ENOMEM + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_format.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_format.go new file mode 100644 index 000000000..d80a64e88 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_format.go @@ -0,0 +1,126 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "time" +) + +// Go version of apr_strfsize +func FormatSize(size uint64) string { + ord := []string{"K", "M", "G", "T", "P", "E"} + o := 0 + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + + if size < 973 { + fmt.Fprintf(w, "%3d ", size) + w.Flush() + return buf.String() + } + + for { + remain := size & 1023 + size >>= 10 + + if size >= 973 { + o++ + continue + } + + if size < 9 || (size == 9 && remain < 973) { + remain = ((remain * 5) + 256) / 512 + if remain >= 10 { + size++ + remain = 0 + } + + fmt.Fprintf(w, "%d.%d%s", size, remain, ord[o]) + break + } + + if remain >= 512 { + size++ + } + + fmt.Fprintf(w, "%3d%s", size, ord[o]) + break + } + + w.Flush() + return buf.String() +} + +func FormatPercent(percent float64) string { + return strconv.FormatFloat(percent, 'f', -1, 64) + "%" +} + +func (self *FileSystemUsage) UsePercent() float64 { + b_used := (self.Total - self.Free) / 1024 + b_avail := self.Avail / 1024 + utotal := b_used + b_avail + used := b_used + + if utotal != 0 { + u100 := used * 100 + pct := u100 / utotal + if u100%utotal != 0 { + pct += 1 + } + return (float64(pct) / float64(100)) * 100.0 + } + + return 0.0 +} + +func (self *Uptime) Format() string { + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + uptime := uint64(self.Length) + + days := uptime / (60 * 60 * 24) + + if days != 0 { + s := "" + if days > 1 { + s = "s" + } + fmt.Fprintf(w, "%d day%s, ", days, s) + } + + minutes := uptime / 60 + hours := minutes / 60 + hours %= 24 + minutes %= 60 + + fmt.Fprintf(w, "%2d:%02d", hours, minutes) + + w.Flush() + return buf.String() +} + +func (self *ProcTime) FormatStartTime() string { + if self.StartTime == 0 { + return "00:00" + } + start := time.Unix(int64(self.StartTime)/1000, 0) + format := "Jan02" + if time.Since(start).Seconds() < (60 * 60 * 24) { + format = "15:04" + } + return start.Format(format) +} + +func (self *ProcTime) FormatTotal() string { + t := self.Total / 1000 + ss := t % 60 + t /= 60 + mm := t % 60 + t /= 60 + hh := t % 24 + return fmt.Sprintf("%02d:%02d:%02d", hh, mm, ss) +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface.go new file mode 100644 index 000000000..dd72a76b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface.go @@ -0,0 +1,141 @@ +package sigar + +import ( + "time" +) + +type Sigar interface { + CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) + GetLoadAverage() (LoadAverage, error) + GetMem() (Mem, error) + GetSwap() (Swap, error) + GetFileSystemUsage(string) (FileSystemUsage, error) +} + +type Cpu struct { + User uint64 + Nice uint64 + Sys uint64 + Idle uint64 + Wait uint64 + Irq uint64 + SoftIrq uint64 + Stolen uint64 +} + +func (cpu *Cpu) Total() uint64 { + return cpu.User + cpu.Nice + cpu.Sys + cpu.Idle + + cpu.Wait + cpu.Irq + cpu.SoftIrq + cpu.Stolen +} + +func (cpu Cpu) Delta(other Cpu) Cpu { + return Cpu{ + User: cpu.User - other.User, + Nice: cpu.Nice - other.Nice, + Sys: cpu.Sys - other.Sys, + Idle: cpu.Idle - other.Idle, + Wait: cpu.Wait - other.Wait, + Irq: cpu.Irq - other.Irq, + SoftIrq: cpu.SoftIrq - other.SoftIrq, + Stolen: cpu.Stolen - other.Stolen, + } +} + +type LoadAverage struct { + One, Five, Fifteen float64 +} + +type Uptime struct { + Length float64 +} + +type Mem struct { + Total uint64 + Used uint64 + Free uint64 + ActualFree uint64 + ActualUsed uint64 +} + +type Swap struct { + Total uint64 + Used uint64 + Free uint64 +} + +type CpuList struct { + List []Cpu +} + +type FileSystem struct { + DirName string + DevName string + TypeName string + SysTypeName string + Options string + Flags uint32 +} + +type FileSystemList struct { + List []FileSystem +} + +type FileSystemUsage struct { + Total uint64 + Used uint64 + Free uint64 + Avail uint64 + Files uint64 + FreeFiles uint64 +} + +type ProcList struct { + List []int +} + +type RunState byte + +const ( + RunStateSleep = 'S' + RunStateRun = 'R' + RunStateStop = 'T' + RunStateZombie = 'Z' + RunStateIdle = 'D' + RunStateUnknown = '?' +) + +type ProcState struct { + Name string + State RunState + Ppid int + Tty int + Priority int + Nice int + Processor int +} + +type ProcMem struct { + Size uint64 + Resident uint64 + Share uint64 + MinorFaults uint64 + MajorFaults uint64 + PageFaults uint64 +} + +type ProcTime struct { + StartTime uint64 + User uint64 + Sys uint64 + Total uint64 +} + +type ProcArgs struct { + List []string +} + +type ProcExe struct { + Name string + Cwd string + Root string +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface_test.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface_test.go new file mode 100644 index 000000000..fe26abd1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface_test.go @@ -0,0 +1,135 @@ +package sigar_test + +import ( + "os" + "path/filepath" + "runtime" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/cloudfoundry/gosigar" +) + +var _ = Describe("Sigar", func() { + var invalidPid = 666666 + + It("cpu", func() { + cpu := Cpu{} + err := cpu.Get() + Expect(err).ToNot(HaveOccurred()) + }) + + It("load average", func() { + avg := LoadAverage{} + err := avg.Get() + Expect(err).ToNot(HaveOccurred()) + }) + + It("uptime", func() { + uptime := Uptime{} + err := uptime.Get() + Expect(err).ToNot(HaveOccurred()) + Expect(uptime.Length).To(BeNumerically(">", 0)) + }) + + It("mem", func() { + mem := Mem{} + err := mem.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(mem.Total).To(BeNumerically(">", 0)) + Expect(mem.Used + mem.Free).To(BeNumerically("<=", mem.Total)) + }) + + It("swap", func() { + swap := Swap{} + err := swap.Get() + Expect(err).ToNot(HaveOccurred()) + Expect(swap.Used + swap.Free).To(BeNumerically("<=", swap.Total)) + }) + + It("cpu list", func() { + cpulist := CpuList{} + err := cpulist.Get() + Expect(err).ToNot(HaveOccurred()) + + nsigar := len(cpulist.List) + numcpu := runtime.NumCPU() + Expect(nsigar).To(Equal(numcpu)) + }) + + It("file system list", func() { + fslist := FileSystemList{} + err := fslist.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(len(fslist.List)).To(BeNumerically(">", 0)) + }) + + It("file system usage", func() { + fsusage := FileSystemUsage{} + err := fsusage.Get("/") + Expect(err).ToNot(HaveOccurred()) + + err = fsusage.Get("T O T A L L Y B O G U S") + Expect(err).To(HaveOccurred()) + }) + + It("proc list", func() { + pids := ProcList{} + err := pids.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(len(pids.List)).To(BeNumerically(">", 2)) + + err = pids.Get() + Expect(err).ToNot(HaveOccurred()) + }) + + It("proc state", func() { + state := ProcState{} + err := state.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + Expect([]RunState{RunStateRun, RunStateSleep}).To(ContainElement(state.State)) + Expect([]string{"go", "ginkgo"}).To(ContainElement(state.Name)) + + err = state.Get(invalidPid) + Expect(err).To(HaveOccurred()) + }) + + It("proc mem", func() { + mem := ProcMem{} + err := mem.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + err = mem.Get(invalidPid) + Expect(err).To(HaveOccurred()) + }) + + It("proc time", func() { + time := ProcTime{} + err := time.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + err = time.Get(invalidPid) + Expect(err).To(HaveOccurred()) + }) + + It("proc args", func() { + args := ProcArgs{} + err := args.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + Expect(len(args.List)).To(BeNumerically(">=", 2)) + }) + + It("proc exe", func() { + exe := ProcExe{} + err := exe.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + Expect([]string{"go", "ginkgo"}).To(ContainElement(filepath.Base(exe.Name))) + }) +}) diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_linux.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_linux.go new file mode 100644 index 000000000..68ffb0f9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_linux.go @@ -0,0 +1,386 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "syscall" +) + +var system struct { + ticks uint64 + btime uint64 +} + +var Procd string + +func init() { + system.ticks = 100 // C.sysconf(C._SC_CLK_TCK) + + Procd = "/proc" + + // grab system boot time + readFile(Procd+"/stat", func(line string) bool { + if strings.HasPrefix(line, "btime") { + system.btime, _ = strtoull(line[6:]) + return false // stop reading + } + return true + }) +} + +func (self *LoadAverage) Get() error { + line, err := ioutil.ReadFile(Procd + "/loadavg") + if err != nil { + return nil + } + + fields := strings.Fields(string(line)) + + self.One, _ = strconv.ParseFloat(fields[0], 64) + self.Five, _ = strconv.ParseFloat(fields[1], 64) + self.Fifteen, _ = strconv.ParseFloat(fields[2], 64) + + return nil +} + +func (self *Uptime) Get() error { + sysinfo := syscall.Sysinfo_t{} + + if err := syscall.Sysinfo(&sysinfo); err != nil { + return err + } + + self.Length = float64(sysinfo.Uptime) + + return nil +} + +func (self *Mem) Get() error { + var buffers, cached uint64 + table := map[string]*uint64{ + "MemTotal": &self.Total, + "MemFree": &self.Free, + "Buffers": &buffers, + "Cached": &cached, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + kern := buffers + cached + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +func (self *Swap) Get() error { + table := map[string]*uint64{ + "SwapTotal": &self.Total, + "SwapFree": &self.Free, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + return nil +} + +func (self *Cpu) Get() error { + return readFile(Procd+"/stat", func(line string) bool { + if len(line) > 4 && line[0:4] == "cpu " { + parseCpuStat(self, line) + return false + } + return true + + }) +} + +func (self *CpuList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 4 + } + list := make([]Cpu, 0, capacity) + + err := readFile(Procd+"/stat", func(line string) bool { + if len(line) > 3 && line[0:3] == "cpu" && line[3] != ' ' { + cpu := Cpu{} + parseCpuStat(&cpu, line) + list = append(list, cpu) + } + return true + }) + + self.List = list + + return err +} + +func (self *FileSystemList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 10 + } + fslist := make([]FileSystem, 0, capacity) + + err := readFile("/etc/mtab", func(line string) bool { + fields := strings.Fields(line) + + fs := FileSystem{} + fs.DevName = fields[0] + fs.DirName = fields[1] + fs.SysTypeName = fields[2] + fs.Options = fields[3] + + fslist = append(fslist, fs) + + return true + }) + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + dir, err := os.Open(Procd) + if err != nil { + return err + } + defer dir.Close() + + const readAllDirnames = -1 // see os.File.Readdirnames doc + + names, err := dir.Readdirnames(readAllDirnames) + if err != nil { + return err + } + + capacity := len(names) + list := make([]int, 0, capacity) + + for _, name := range names { + if name[0] < '0' || name[0] > '9' { + continue + } + pid, err := strconv.Atoi(name) + if err == nil { + list = append(list, pid) + } + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + self.Name = fields[1][1 : len(fields[1])-1] // strip ()'s + + self.State = RunState(fields[2][0]) + + self.Ppid, _ = strconv.Atoi(fields[3]) + + self.Tty, _ = strconv.Atoi(fields[6]) + + self.Priority, _ = strconv.Atoi(fields[17]) + + self.Nice, _ = strconv.Atoi(fields[18]) + + self.Processor, _ = strconv.Atoi(fields[38]) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + contents, err := readProcFile(pid, "statm") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + size, _ := strtoull(fields[0]) + self.Size = size << 12 + + rss, _ := strtoull(fields[1]) + self.Resident = rss << 12 + + share, _ := strtoull(fields[2]) + self.Share = share << 12 + + contents, err = readProcFile(pid, "stat") + if err != nil { + return err + } + + fields = strings.Fields(string(contents)) + + self.MinorFaults, _ = strtoull(fields[10]) + self.MajorFaults, _ = strtoull(fields[12]) + self.PageFaults = self.MinorFaults + self.MajorFaults + + return nil +} + +func (self *ProcTime) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + user, _ := strtoull(fields[13]) + sys, _ := strtoull(fields[14]) + // convert to millis + self.User = user * (1000 / system.ticks) + self.Sys = sys * (1000 / system.ticks) + self.Total = self.User + self.Sys + + // convert to millis + self.StartTime, _ = strtoull(fields[21]) + self.StartTime /= system.ticks + self.StartTime += system.btime + self.StartTime *= 1000 + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + contents, err := readProcFile(pid, "cmdline") + if err != nil { + return err + } + + bbuf := bytes.NewBuffer(contents) + + var args []string + + for { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + args = append(args, string(chop(arg))) + } + + self.List = args + + return nil +} + +func (self *ProcExe) Get(pid int) error { + fields := map[string]*string{ + "exe": &self.Name, + "cwd": &self.Cwd, + "root": &self.Root, + } + + for name, field := range fields { + val, err := os.Readlink(procFileName(pid, name)) + + if err != nil { + return err + } + + *field = val + } + + return nil +} + +func parseMeminfo(table map[string]*uint64) error { + return readFile(Procd+"/meminfo", func(line string) bool { + fields := strings.Split(line, ":") + + if ptr := table[fields[0]]; ptr != nil { + num := strings.TrimLeft(fields[1], " ") + val, err := strtoull(strings.Fields(num)[0]) + if err == nil { + *ptr = val * 1024 + } + } + + return true + }) +} + +func parseCpuStat(self *Cpu, line string) error { + fields := strings.Fields(line) + + self.User, _ = strtoull(fields[1]) + self.Nice, _ = strtoull(fields[2]) + self.Sys, _ = strtoull(fields[3]) + self.Idle, _ = strtoull(fields[4]) + self.Wait, _ = strtoull(fields[5]) + self.Irq, _ = strtoull(fields[6]) + self.SoftIrq, _ = strtoull(fields[7]) + self.Stolen, _ = strtoull(fields[8]) + + return nil +} + +func readFile(file string, handler func(string) bool) error { + contents, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + + for { + line, _, err := reader.ReadLine() + if err == io.EOF { + break + } + if !handler(string(line)) { + break + } + } + + return nil +} + +func strtoull(val string) (uint64, error) { + return strconv.ParseUint(val, 10, 64) +} + +func procFileName(pid int, name string) string { + return Procd + "/" + strconv.Itoa(pid) + "/" + name +} + +func readProcFile(pid int, name string) ([]byte, error) { + path := procFileName(pid, name) + contents, err := ioutil.ReadFile(path) + + if err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err == syscall.ENOENT { + return nil, syscall.ESRCH + } + } + } + + return contents, err +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_linux_test.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_linux_test.go new file mode 100644 index 000000000..c5fcdbc9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_linux_test.go @@ -0,0 +1,225 @@ +package sigar_test + +import ( + "io/ioutil" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + sigar "github.com/cloudfoundry/gosigar" +) + +var _ = Describe("sigarLinux", func() { + var procd string + + BeforeEach(func() { + var err error + procd, err = ioutil.TempDir("", "sigarTests") + Expect(err).ToNot(HaveOccurred()) + sigar.Procd = procd + }) + + AfterEach(func() { + sigar.Procd = "/proc" + }) + + Describe("CPU", func() { + var ( + statFile string + cpu sigar.Cpu + ) + + BeforeEach(func() { + statFile = procd + "/stat" + cpu = sigar.Cpu{} + }) + + Describe("Get", func() { + It("gets CPU usage", func() { + statContents := []byte("cpu 25 1 2 3 4 5 6 7") + err := ioutil.WriteFile(statFile, statContents, 0644) + Expect(err).ToNot(HaveOccurred()) + + err = cpu.Get() + Expect(err).ToNot(HaveOccurred()) + Expect(cpu.User).To(Equal(uint64(25))) + }) + + It("ignores empty lines", func() { + statContents := []byte("cpu ") + err := ioutil.WriteFile(statFile, statContents, 0644) + Expect(err).ToNot(HaveOccurred()) + + err = cpu.Get() + Expect(err).ToNot(HaveOccurred()) + Expect(cpu.User).To(Equal(uint64(0))) + }) + }) + + Describe("CollectCpuStats", func() { + It("collects CPU usage over time", func() { + statContents := []byte("cpu 25 1 2 3 4 5 6 7") + err := ioutil.WriteFile(statFile, statContents, 0644) + Expect(err).ToNot(HaveOccurred()) + + concreteSigar := &sigar.ConcreteSigar{} + cpuUsages, stop := concreteSigar.CollectCpuStats(500 * time.Millisecond) + + Expect(<-cpuUsages).To(Equal(sigar.Cpu{ + User: uint64(25), + Nice: uint64(1), + Sys: uint64(2), + Idle: uint64(3), + Wait: uint64(4), + Irq: uint64(5), + SoftIrq: uint64(6), + Stolen: uint64(7), + })) + + statContents = []byte("cpu 30 3 7 10 25 55 36 65") + err = ioutil.WriteFile(statFile, statContents, 0644) + Expect(err).ToNot(HaveOccurred()) + + Expect(<-cpuUsages).To(Equal(sigar.Cpu{ + User: uint64(5), + Nice: uint64(2), + Sys: uint64(5), + Idle: uint64(7), + Wait: uint64(21), + Irq: uint64(50), + SoftIrq: uint64(30), + Stolen: uint64(58), + })) + + stop <- struct{}{} + }) + }) + }) + + Describe("Mem", func() { + var meminfoFile string + BeforeEach(func() { + meminfoFile = procd + "/meminfo" + + meminfoContents := ` +MemTotal: 374256 kB +MemFree: 274460 kB +Buffers: 9764 kB +Cached: 38648 kB +SwapCached: 0 kB +Active: 33772 kB +Inactive: 31184 kB +Active(anon): 16572 kB +Inactive(anon): 552 kB +Active(file): 17200 kB +Inactive(file): 30632 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 786428 kB +SwapFree: 786428 kB +Dirty: 0 kB +Writeback: 0 kB +AnonPages: 16564 kB +Mapped: 6612 kB +Shmem: 584 kB +Slab: 19092 kB +SReclaimable: 9128 kB +SUnreclaim: 9964 kB +KernelStack: 672 kB +PageTables: 1864 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 973556 kB +Committed_AS: 55880 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 21428 kB +VmallocChunk: 34359713596 kB +HardwareCorrupted: 0 kB +AnonHugePages: 0 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 59328 kB +DirectMap2M: 333824 kB +` + err := ioutil.WriteFile(meminfoFile, []byte(meminfoContents), 0444) + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns correct memory info", func() { + mem := sigar.Mem{} + err := mem.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(mem.Total).To(BeNumerically("==", 374256*1024)) + Expect(mem.Free).To(BeNumerically("==", 274460*1024)) + }) + }) + + Describe("Swap", func() { + var meminfoFile string + BeforeEach(func() { + meminfoFile = procd + "/meminfo" + + meminfoContents := ` +MemTotal: 374256 kB +MemFree: 274460 kB +Buffers: 9764 kB +Cached: 38648 kB +SwapCached: 0 kB +Active: 33772 kB +Inactive: 31184 kB +Active(anon): 16572 kB +Inactive(anon): 552 kB +Active(file): 17200 kB +Inactive(file): 30632 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 786428 kB +SwapFree: 786428 kB +Dirty: 0 kB +Writeback: 0 kB +AnonPages: 16564 kB +Mapped: 6612 kB +Shmem: 584 kB +Slab: 19092 kB +SReclaimable: 9128 kB +SUnreclaim: 9964 kB +KernelStack: 672 kB +PageTables: 1864 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 973556 kB +Committed_AS: 55880 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 21428 kB +VmallocChunk: 34359713596 kB +HardwareCorrupted: 0 kB +AnonHugePages: 0 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 59328 kB +DirectMap2M: 333824 kB +` + err := ioutil.WriteFile(meminfoFile, []byte(meminfoContents), 0444) + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns correct memory info", func() { + swap := sigar.Swap{} + err := swap.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(swap.Total).To(BeNumerically("==", 786428*1024)) + Expect(swap.Free).To(BeNumerically("==", 786428*1024)) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_suite_test.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_suite_test.go new file mode 100644 index 000000000..44287f631 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_suite_test.go @@ -0,0 +1,13 @@ +package sigar_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestGosigar(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gosigar Suite") +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_unix.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_unix.go new file mode 100644 index 000000000..39f18784b --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_unix.go @@ -0,0 +1,26 @@ +// Copyright (c) 2012 VMware, Inc. + +// +build darwin freebsd linux netbsd openbsd + +package sigar + +import "syscall" + +func (self *FileSystemUsage) Get(path string) error { + stat := syscall.Statfs_t{} + err := syscall.Statfs(path, &stat) + if err != nil { + return err + } + + bsize := stat.Bsize / 512 + + self.Total = (uint64(stat.Blocks) * uint64(bsize)) >> 1 + self.Free = (uint64(stat.Bfree) * uint64(bsize)) >> 1 + self.Avail = (uint64(stat.Bavail) * uint64(bsize)) >> 1 + self.Used = self.Total - self.Free + self.Files = stat.Files + self.FreeFiles = stat.Ffree + + return nil +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_util.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_util.go new file mode 100644 index 000000000..a02df9419 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_util.go @@ -0,0 +1,22 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "unsafe" +) + +func bytePtrToString(ptr *int8) string { + bytes := (*[10000]byte)(unsafe.Pointer(ptr)) + + n := 0 + for bytes[n] != 0 { + n++ + } + + return string(bytes[0:n]) +} + +func chop(buf []byte) []byte { + return buf[0 : len(buf)-1] +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_windows.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_windows.go new file mode 100644 index 000000000..0c779d7c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_windows.go @@ -0,0 +1,100 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +func init() { +} + +func (self *LoadAverage) Get() error { + return nil +} + +func (self *Uptime) Get() error { + return nil +} + +func (self *Mem) Get() error { + var statex C.MEMORYSTATUSEX + statex.dwLength = C.DWORD(unsafe.Sizeof(statex)) + + succeeded := C.GlobalMemoryStatusEx(&statex) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GlobalMemoryStatusEx failed with error: %d", int(lastError)) + } + + self.Total = uint64(statex.ullTotalPhys) + return nil +} + +func (self *Swap) Get() error { + return notImplemented() +} + +func (self *Cpu) Get() error { + return notImplemented() +} + +func (self *CpuList) Get() error { + return notImplemented() +} + +func (self *FileSystemList) Get() error { + return notImplemented() +} + +func (self *ProcList) Get() error { + return notImplemented() +} + +func (self *ProcState) Get(pid int) error { + return notImplemented() +} + +func (self *ProcMem) Get(pid int) error { + return notImplemented() +} + +func (self *ProcTime) Get(pid int) error { + return notImplemented() +} + +func (self *ProcArgs) Get(pid int) error { + return notImplemented() +} + +func (self *ProcExe) Get(pid int) error { + return notImplemented() +} + +func (self *FileSystemUsage) Get(path string) error { + var availableBytes C.ULARGE_INTEGER + var totalBytes C.ULARGE_INTEGER + var totalFreeBytes C.ULARGE_INTEGER + + pathChars := C.CString(path) + defer C.free(unsafe.Pointer(pathChars)) + + succeeded := C.GetDiskFreeSpaceEx((*C.CHAR)(pathChars), &availableBytes, &totalBytes, &totalFreeBytes) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GetDiskFreeSpaceEx failed with error: %d", int(lastError)) + } + + self.Total = *(*uint64)(unsafe.Pointer(&totalBytes)) + return nil +} + +func notImplemented() error { + panic("Not Implemented") + return nil +} diff --git a/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_windows_test.go b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_windows_test.go new file mode 100644 index 000000000..868bdaab8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_windows_test.go @@ -0,0 +1,32 @@ +package sigar_test + +import ( + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + sigar "github.com/cloudfoundry/gosigar" +) + +var _ = Describe("SigarWindows", func() { + Describe("Memory", func() { + It("gets the total memory", func() { + mem := sigar.Mem{} + err := mem.Get() + + Ω(err).ShouldNot(HaveOccurred()) + Ω(mem.Total).Should(BeNumerically(">", 0)) + }) + }) + + Describe("Disk", func() { + It("gets the total disk space", func() { + usage := sigar.FileSystemUsage{} + err := usage.Get(os.TempDir()) + + Ω(err).ShouldNot(HaveOccurred()) + Ω(usage.Total).Should(BeNumerically(">", 0)) + }) + }) +}) diff --git a/Makefile b/Makefile index 5c27c5d95..021420b5a 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ test: prepare docker-compose $(GOPATH)/bin/godep go test -v ./... test-short: prepare - $(GOPATH)/bin/godep go test -v -short ./... + $(GOPATH)/bin/godep go test -short ./... test-cleanup: docker-compose kill diff --git a/plugins/system/ps.go b/plugins/system/ps.go index 0bf67011b..1f06bafde 100644 --- a/plugins/system/ps.go +++ b/plugins/system/ps.go @@ -10,7 +10,6 @@ import ( "github.com/influxdb/telegraf/plugins/system/ps/cpu" "github.com/influxdb/telegraf/plugins/system/ps/disk" "github.com/influxdb/telegraf/plugins/system/ps/docker" - "github.com/influxdb/telegraf/plugins/system/ps/load" "github.com/influxdb/telegraf/plugins/system/ps/mem" "github.com/influxdb/telegraf/plugins/system/ps/net" ) @@ -24,7 +23,6 @@ type DockerContainerStat struct { } type PS interface { - LoadAvg() (*load.LoadAvgStat, error) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) DiskUsage() ([]*disk.DiskUsageStat, error) NetIO() ([]net.NetIOCountersStat, error) @@ -45,10 +43,6 @@ type systemPS struct { dockerClient *dc.Client } -func (s *systemPS) LoadAvg() (*load.LoadAvgStat, error) { - return load.LoadAvg() -} - func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.CPUTimesStat, error) { var cpuTimes []cpu.CPUTimesStat if perCPU { diff --git a/plugins/system/system.go b/plugins/system/system.go index f3c3bc7e8..5d5c64e97 100644 --- a/plugins/system/system.go +++ b/plugins/system/system.go @@ -1,39 +1,48 @@ package system -import "github.com/influxdb/telegraf/plugins" +import ( + "github.com/cloudfoundry/gosigar" -type SystemStats struct { - ps PS -} + "github.com/influxdb/telegraf/plugins" +) + +type SystemStats struct{} func (_ *SystemStats) Description() string { - return "Read metrics about system load" + return "Read metrics about system load & uptime" } func (_ *SystemStats) SampleConfig() string { return "" } -func (s *SystemStats) add(acc plugins.Accumulator, +func (_ *SystemStats) add(acc plugins.Accumulator, name string, val float64, tags map[string]string) { if val >= 0 { acc.Add(name, val, tags) } } -func (s *SystemStats) Gather(acc plugins.Accumulator) error { - lv, err := s.ps.LoadAvg() - if err != nil { +func (_ *SystemStats) Gather(acc plugins.Accumulator) error { + loadavg := sigar.LoadAverage{} + if err := loadavg.Get(); err != nil { return err } - acc.Add("load1", lv.Load1, nil) - acc.Add("load5", lv.Load5, nil) - acc.Add("load15", lv.Load15, nil) + uptime := sigar.Uptime{} + if err := uptime.Get(); err != nil { + return err + } + + acc.Add("load1", loadavg.One, nil) + acc.Add("load5", loadavg.Five, nil) + acc.Add("load15", loadavg.Fifteen, nil) + acc.Add("uptime", uptime.Length, nil) + acc.Add("uptime_format", uptime.Format(), nil) return nil } func init() { plugins.Add("system", func() plugins.Plugin { - return &SystemStats{ps: &systemPS{}} + return &SystemStats{} }) } diff --git a/plugins/system/system_test.go b/plugins/system/system_test.go index 78c13834b..199141c4a 100644 --- a/plugins/system/system_test.go +++ b/plugins/system/system_test.go @@ -7,7 +7,6 @@ import ( "github.com/influxdb/telegraf/plugins/system/ps/cpu" "github.com/influxdb/telegraf/plugins/system/ps/disk" - "github.com/influxdb/telegraf/plugins/system/ps/load" "github.com/influxdb/telegraf/plugins/system/ps/mem" "github.com/influxdb/telegraf/plugins/system/ps/net" "github.com/influxdb/telegraf/testutil" @@ -22,14 +21,6 @@ func TestSystemStats_GenerateStats(t *testing.T) { var acc testutil.Accumulator - lv := &load.LoadAvgStat{ - Load1: 0.3, - Load5: 1.5, - Load15: 0.8, - } - - mps.On("LoadAvg").Return(lv, nil) - cts := cpu.CPUTimesStat{ CPU: "cpu0", User: 3.1, @@ -128,15 +119,6 @@ func TestSystemStats_GenerateStats(t *testing.T) { mps.On("SwapStat").Return(sms, nil) - ss := &SystemStats{ps: &mps} - - err := ss.Gather(&acc) - require.NoError(t, err) - - assert.True(t, acc.CheckValue("load1", 0.3)) - assert.True(t, acc.CheckValue("load5", 1.5)) - assert.True(t, acc.CheckValue("load15", 0.8)) - cs := NewCPUStats(&mps) cputags := map[string]string{ @@ -144,7 +126,7 @@ func TestSystemStats_GenerateStats(t *testing.T) { } preCPUPoints := len(acc.Points) - err = cs.Gather(&acc) + err := cs.Gather(&acc) require.NoError(t, err) numCPUPoints := len(acc.Points) - preCPUPoints From 9c57c30e57e96af8dae35f7b3cc5b2a61348fe69 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 31 Aug 2015 15:57:52 -0600 Subject: [PATCH 031/125] Redis plugin internal names consistency fix, g -> r --- plugins/redis/redis.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/plugins/redis/redis.go b/plugins/redis/redis.go index 013793f1b..8d86bbc26 100644 --- a/plugins/redis/redis.go +++ b/plugins/redis/redis.go @@ -74,12 +74,12 @@ var ErrProtocolError = errors.New("redis protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *Redis) Gather(acc plugins.Accumulator) error { - if len(g.Servers) == 0 { +func (r *Redis) Gather(acc plugins.Accumulator) error { + if len(r.Servers) == 0 { url := &url.URL{ Host: ":6379", } - g.gatherServer(url, acc) + r.gatherServer(url, acc) return nil } @@ -87,7 +87,7 @@ func (g *Redis) Gather(acc plugins.Accumulator) error { var outerr error - for _, serv := range g.Servers { + for _, serv := range r.Servers { u, err := url.Parse(serv) if err != nil { return fmt.Errorf("Unable to parse to address '%s': %s", serv, err) @@ -100,7 +100,7 @@ func (g *Redis) Gather(acc plugins.Accumulator) error { wg.Add(1) go func(serv string) { defer wg.Done() - outerr = g.gatherServer(u, acc) + outerr = r.gatherServer(u, acc) }(serv) } @@ -111,8 +111,8 @@ func (g *Redis) Gather(acc plugins.Accumulator) error { const defaultPort = "6379" -func (g *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { - if g.c == nil { +func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { + if r.c == nil { _, _, err := net.SplitHostPort(addr.Host) if err != nil { @@ -141,12 +141,12 @@ func (g *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { } } - g.c = c + r.c = c } - g.c.Write([]byte("info\r\n")) + r.c.Write([]byte("info\r\n")) - r := bufio.NewReader(g.c) + r := bufio.NewReader(r.c) line, err := r.ReadString('\n') if err != nil { From 4d19fc08605cda4358a6d37dd04f768b2e469df4 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 31 Aug 2015 16:06:51 -0600 Subject: [PATCH 032/125] Fixup for g->r change, io.reader was already using 'r' --- plugins/redis/redis.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/redis/redis.go b/plugins/redis/redis.go index 8d86bbc26..4a95772aa 100644 --- a/plugins/redis/redis.go +++ b/plugins/redis/redis.go @@ -129,9 +129,9 @@ func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { if set && pwd != "" { c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd))) - r := bufio.NewReader(c) + rdr := bufio.NewReader(c) - line, err := r.ReadString('\n') + line, err := rdr.ReadString('\n') if err != nil { return err } @@ -146,9 +146,9 @@ func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { r.c.Write([]byte("info\r\n")) - r := bufio.NewReader(r.c) + rdr := bufio.NewReader(r.c) - line, err := r.ReadString('\n') + line, err := rdr.ReadString('\n') if err != nil { return err } @@ -169,7 +169,7 @@ func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { var read int for read < sz { - line, err := r.ReadString('\n') + line, err := rdr.ReadString('\n') if err != nil { return err } From b86c6bba4e8fb6d137757ed7cd5084256d30dc1e Mon Sep 17 00:00:00 2001 From: Michael Wood Date: Wed, 2 Sep 2015 09:11:01 +0200 Subject: [PATCH 033/125] README: Say when tagpass/tagdrop are valid from. closes #163 --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index df499ae27..f3f14aac3 100644 --- a/README.md +++ b/README.md @@ -87,10 +87,10 @@ There are 5 configuration options that are configurable per plugin: current plugin. Each string in the array is tested as a prefix against metric names and if it matches, the metric is emitted. * **drop**: The inverse of pass, if a metric name matches, it is not emitted. -* **tagpass**: tag names and arrays of strings that are used to filter metrics by +* **tagpass**: (added in 0.1.5) tag names and arrays of strings that are used to filter metrics by the current plugin. Each string in the array is tested as an exact match against the tag name, and if it matches the metric is emitted. -* **tagdrop**: The inverse of tagpass. If a tag matches, the metric is not emitted. +* **tagdrop**: (added in 0.1.5) The inverse of tagpass. If a tag matches, the metric is not emitted. This is tested on metrics that have passed the tagpass test. * **interval**: How often to gather this metric. Normal plugins use a single global interval, but if one particular plugin should be run less or more often, @@ -121,7 +121,7 @@ measurements at a 10s interval and will collect totalcpu & percpu data. totalcpu = true ``` -Below is how to configure `tagpass` parameters (added in 0.1.4) +Below is how to configure `tagpass` and `tagdrop` parameters (added in 0.1.5) ``` # Don't collect CPU data for cpu6 & cpu7 From 3f63bcde12fa9d1a535a1c36ae4c560863c66843 Mon Sep 17 00:00:00 2001 From: nickscript0 Date: Tue, 1 Sep 2015 11:30:56 -0600 Subject: [PATCH 034/125] add additional MySQL metrics --- plugins/mysql/mysql.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/plugins/mysql/mysql.go b/plugins/mysql/mysql.go index 1bc72ff2a..d53e580c7 100644 --- a/plugins/mysql/mysql.go +++ b/plugins/mysql/mysql.go @@ -58,6 +58,10 @@ type mapping struct { } var mappings = []*mapping{ + { + onServer: "Aborted_", + inExport: "aborted_", + }, { onServer: "Bytes_", inExport: "bytes_", @@ -66,6 +70,10 @@ var mappings = []*mapping{ onServer: "Com_", inExport: "commands_", }, + { + onServer: "Created_", + inExport: "created_", + }, { onServer: "Handler_", inExport: "handler_", @@ -74,6 +82,26 @@ var mappings = []*mapping{ onServer: "Innodb_", inExport: "innodb_", }, + { + onServer: "Key_", + inExport: "key_", + }, + { + onServer: "Open_", + inExport: "open_", + }, + { + onServer: "Opened_", + inExport: "opened_", + }, + { + onServer: "Qcache_", + inExport: "qcache_", + }, + { + onServer: "Table_", + inExport: "table_", + }, { onServer: "Tokudb_", inExport: "tokudb_", From 0143a4227e1a05e388b6d6cc70a8c1baef7d2604 Mon Sep 17 00:00:00 2001 From: nickscript0 Date: Wed, 2 Sep 2015 10:03:05 -0600 Subject: [PATCH 035/125] add additional metrics to mysql plugin tests Closes #165 --- CHANGELOG.md | 6 +++--- plugins/mysql/mysql_test.go | 7 +++++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 940df2cab..c7ef051cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,9 @@ ## v0.1.8 [unreleased] ### Features - -[#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin -[#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 +- [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin +- [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 +- [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0 ### Bugfixes diff --git a/plugins/mysql/mysql_test.go b/plugins/mysql/mysql_test.go index b4c29146e..8a284ca02 100644 --- a/plugins/mysql/mysql_test.go +++ b/plugins/mysql/mysql_test.go @@ -33,6 +33,13 @@ func TestMysqlGeneratesMetrics(t *testing.T) { {"bytes", 2}, {"innodb", 51}, {"threads", 4}, + {"aborted", 2}, + {"created", 3}, + {"key", 7}, + {"open", 7}, + {"opened", 3}, + {"qcache", 8}, + {"table", 5}, } intMetrics := []string{ From 13061d1ec7a1ddbe5dc20da82d412fce96d410b4 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 2 Sep 2015 10:11:07 -0600 Subject: [PATCH 036/125] package.sh: upload raw binaries to S3 Closes #166 --- CHANGELOG.md | 1 + package.sh | 25 +++++++++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7ef051cb..ad1f05e79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin - [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 - [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0 +- [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3 ### Bugfixes diff --git a/package.sh b/package.sh index b36e482e2..da87b6bbf 100755 --- a/package.sh +++ b/package.sh @@ -260,7 +260,9 @@ else debian_package=telegraf_${VERSION}_amd64.deb fi -COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH --name telegraf --version $VERSION --config-files $CONFIG_ROOT_DIR ." +COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE \ + --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH \ + --name telegraf --version $VERSION --config-files $CONFIG_ROOT_DIR ." $rpm_args fpm -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS if [ $? -ne 0 ]; then echo "Failed to create RPM package -- aborting." @@ -289,16 +291,35 @@ if [ "$CIRCLE_BRANCH" == "" ]; then cleanup_exit 1 fi + # Upload .deb and .rpm packages for filepath in `ls *.{deb,rpm}`; do echo "Uploading $filepath to S3" filename=`basename $filepath` echo "Uploading $filename to s3://get.influxdb.org/telegraf/$filename" - AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://get.influxdb.org/telegraf/$filename --acl public-read --region us-east-1 + AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath \ + s3://get.influxdb.org/telegraf/$filename \ + --acl public-read --region us-east-1 if [ $? -ne 0 ]; then echo "Upload failed -- aborting". cleanup_exit 1 fi done + + # Upload binaries + for b in ${BINS[*]}; do + bin = $GOPATH_INSTALL/bin/$b + zippedbin = $b_$VERSION_linux_x86_64.tar.gz + # Zip the binary + tar -zcf $TMP_WORK_DIR/$zippedbin $bin + echo "Uploading binary: $zippedbin to S3" + AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $TMP_WORK_DIR/$zippedbin \ + s3://get.influxdb.org/telegraf/$zippedbin \ + --acl public-read --region us-east-1 + if [ $? -ne 0 ]; then + echo "Binary upload failed -- aborting". + cleanup_exit 1 + fi + done else echo "Not publishing packages to S3." fi From 5bfb6df0e09f0f238a74dd81778b7c23e164ff4e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 2 Sep 2015 10:30:44 -0600 Subject: [PATCH 037/125] Write data in UTC by default and use 's' precision Closes #159 Closes #162 --- CHANGELOG.md | 5 +++++ agent.go | 31 ++++++++++++++++++++++++++++--- cmd/telegraf/telegraf.go | 5 +++-- config.go | 27 ++++++++++++++++++++------- 4 files changed, 56 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ad1f05e79..575a3c35d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,14 @@ ## v0.1.8 [unreleased] +### Release Notes +Telegraf will now write data in UTC at second precision by default + ### Features - [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin - [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 +- [#159](https://github.com/influxdb/telegraf/pull/159): Use second precision for InfluxDB writes - [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0 +- [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option - [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3 ### Bugfixes diff --git a/agent.go b/agent.go index e54b7e863..2bfa0cf8f 100644 --- a/agent.go +++ b/agent.go @@ -31,7 +31,14 @@ type Agent struct { // Interval at which to gather information Interval Duration - // Run in debug mode? + // Option for outputting data in UTC + UTC bool `toml:"utc"` + + // Precision to write data at + // Valid values for Precision are n, u, ms, s, m, and h + Precision string + + // Option for running in debug mode Debug bool Hostname string @@ -43,8 +50,14 @@ type Agent struct { // NewAgent returns an Agent struct based off the given Config func NewAgent(config *Config) (*Agent, error) { - agent := &Agent{Config: config, Interval: Duration{10 * time.Second}} + agent := &Agent{ + Config: config, + Interval: Duration{10 * time.Second}, + UTC: true, + Precision: "s", + } + // Apply the toml table to the agent config, overriding defaults err := config.ApplyAgent(agent) if err != nil { return nil, err @@ -199,7 +212,11 @@ func (a *Agent) crankParallel() error { var bp BatchPoints bp.Time = time.Now() + if a.UTC { + bp.Time = bp.Time.UTC() + } bp.Tags = a.Config.Tags + bp.Precision = a.Precision for sub := range points { bp.Points = append(bp.Points, sub.Points...) @@ -223,8 +240,12 @@ func (a *Agent) crank() error { } } - bp.Time = time.Now() bp.Tags = a.Config.Tags + bp.Time = time.Now() + if a.UTC { + bp.Time = bp.Time.UTC() + } + bp.Precision = a.Precision return a.flush(&bp) } @@ -250,6 +271,10 @@ func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) err bp.Tags = a.Config.Tags bp.Time = time.Now() + if a.UTC { + bp.Time = bp.Time.UTC() + } + bp.Precision = a.Precision if err := a.flush(&bp); err != nil { outerr = errors.New("Error encountered processing plugins & outputs") diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index c7f863778..f28a81bd4 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -126,8 +126,9 @@ func main() { log.Printf("Loaded plugins: %s", strings.Join(plugins, " ")) if ag.Debug { log.Printf("Debug: enabled") - log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v\n", - ag.Interval, ag.Debug, ag.Hostname) + log.Printf("Agent Config: Interval:%s, Debug:%#v, Hostname:%#v, "+ + "Precision:%#v, UTC: %#v\n", + ag.Interval, ag.Debug, ag.Hostname, ag.Precision, ag.UTC) } log.Printf("Tags enabled: %s", config.ListTags()) diff --git a/config.go b/config.go index 19ebc00bf..3c97f1346 100644 --- a/config.go +++ b/config.go @@ -131,10 +131,11 @@ func (c *Config) ApplyOutput(name string, v interface{}) error { return nil } -// ApplyAgent loads the toml config into the given interface -func (c *Config) ApplyAgent(v interface{}) error { +// ApplyAgent loads the toml config into the given Agent object, overriding +// defaults (such as collection duration) with the values from the toml config. +func (c *Config) ApplyAgent(a *Agent) error { if c.agent != nil { - return toml.UnmarshalTable(c.agent, v) + return toml.UnmarshalTable(c.agent, a) } return nil @@ -350,11 +351,23 @@ var header = `# Telegraf configuration [tags] # dc = "us-east-1" -# Configuration for telegraf itself +# Configuration for telegraf agent [agent] - # interval = "10s" - # debug = false - # hostname = "prod3241" + # Default data collection interval for all plugins + interval = "10s" + + # If utc = false, uses local time (utc is highly recommended) + utc = true + + # Precision of writes, valid values are n, u, ms, s, m, and h + # note: using second precision greatly helps InfluxDB compression + precision = "s" + + # run telegraf in debug mode + debug = false + + # Override default hostname, if empty use os.Hostname() + hostname = "" ############################################################################### From 65b33a848e6a9f9677ba48222d27b328289e2a80 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 2 Sep 2015 14:08:54 -0600 Subject: [PATCH 038/125] Fix default installed config for consistency --- etc/config.sample.toml | 56 +++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/etc/config.sample.toml b/etc/config.sample.toml index 1b7263e4c..5307b6df8 100644 --- a/etc/config.sample.toml +++ b/etc/config.sample.toml @@ -22,55 +22,55 @@ # NOTE: The configuration has a few required parameters. They are marked # with 'required'. Be sure to edit those to make this configuration work. +[tags] + # dc = "us-east-1" + +# Configuration for telegraf itself +[agent] + interval = "10s" + debug = false + hostname = "" + utc = true + precision = "s" + # Configuration for influxdb server to send metrics to [outputs] [outputs.influxdb] -# The full HTTP endpoint URL for your InfluxDB instance -url = "http://localhost:8086" # required. + # The full HTTP endpoint URL for your InfluxDB instance + url = "http://localhost:8086" # required. -# The target database for metrics. This database must already exist -database = "telegraf" # required. + # The target database for metrics. This database must already exist + database = "telegraf" # required. -# username = "telegraf" -# password = "metricsmetricsmetricsmetrics" + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" -# Set the user agent for the POSTs (can be useful for log differentiation) -# user_agent = "telegraf" - -# Tags can also be specified via a normal map, but only one form at a time: - -# [influxdb.tags] -# tags = { "dc" = "us-east-1" } - -# Configuration for telegraf itself -# [agent] -# interval = "10s" -# debug = false -# hostname = "prod3241" + # Set the user agent for the POSTs (can be useful for log differentiation) + # user_agent = "telegraf" # PLUGINS # Read metrics about cpu usage [cpu] -# Whether to report per-cpu stats or not -percpu = true -# # Whether to report total system cpu stats or not -totalcpu = true + # Whether to report per-cpu stats or not + percpu = true + # Whether to report total system cpu stats or not + totalcpu = true # Read metrics about disk usage by mount point [disk] - # no configuration + # no configuration # Read metrics about disk IO by device [io] - # no configuration + # no configuration # Read metrics about memory usage [mem] - # no configuration + # no configuration [system] - # no configuration + # no configuration [swap] - # no configuration + # no configuration From 68e41f130cc6edcc0bc8a16851f87cdffacd1bbe Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 2 Sep 2015 17:16:52 -0600 Subject: [PATCH 039/125] Ping plugin Closes #167 --- CHANGELOG.md | 4 +- config.go | 4 +- outputs/kafka/kafka.go | 8 +- plugins/all/all.go | 1 + plugins/ping/ping.go | 177 +++++++++++++++++++++++++++++++ plugins/ping/ping_test.go | 218 ++++++++++++++++++++++++++++++++++++++ testutil/accumulator.go | 11 ++ 7 files changed, 416 insertions(+), 7 deletions(-) create mode 100644 plugins/ping/ping.go create mode 100644 plugins/ping/ping_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 575a3c35d..26e307908 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,8 @@ ## v0.1.8 [unreleased] ### Release Notes -Telegraf will now write data in UTC at second precision by default +- Telegraf will now write data in UTC at second precision by default +- Now using Go 1.5 to build telegraf ### Features - [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin @@ -10,6 +11,7 @@ Telegraf will now write data in UTC at second precision by default - [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0 - [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option - [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3 +- [#169](https://github.com/influxdb/telegraf/pull/169): Ping plugin ### Bugfixes diff --git a/config.go b/config.go index 3c97f1346..fae445e8c 100644 --- a/config.go +++ b/config.go @@ -439,8 +439,8 @@ func PrintSampleConfig() { func PrintPluginConfig(name string) error { if creator, ok := plugins.Plugins[name]; ok { plugin := creator() - fmt.Printf("# %s\n[%s]\n", plugin.Description(), name) - fmt.Printf(strings.TrimSpace(plugin.SampleConfig())) + fmt.Printf("# %s\n[%s]", plugin.Description(), name) + fmt.Printf(plugin.SampleConfig()) } else { return errors.New(fmt.Sprintf("Plugin %s not found", name)) } diff --git a/outputs/kafka/kafka.go b/outputs/kafka/kafka.go index ac4d61164..49a729b42 100644 --- a/outputs/kafka/kafka.go +++ b/outputs/kafka/kafka.go @@ -19,10 +19,10 @@ type Kafka struct { } var sampleConfig = ` - # URLs of kafka brokers - brokers = ["localhost:9092"] - # Kafka topic for producer messages - topic = "telegraf" + # URLs of kafka brokers + brokers = ["localhost:9092"] + # Kafka topic for producer messages + topic = "telegraf" ` func (k *Kafka) Connect() error { diff --git a/plugins/all/all.go b/plugins/all/all.go index 9400a77a1..ffef12b33 100644 --- a/plugins/all/all.go +++ b/plugins/all/all.go @@ -14,6 +14,7 @@ import ( _ "github.com/influxdb/telegraf/plugins/mongodb" _ "github.com/influxdb/telegraf/plugins/mysql" _ "github.com/influxdb/telegraf/plugins/nginx" + _ "github.com/influxdb/telegraf/plugins/ping" _ "github.com/influxdb/telegraf/plugins/postgresql" _ "github.com/influxdb/telegraf/plugins/prometheus" _ "github.com/influxdb/telegraf/plugins/rabbitmq" diff --git a/plugins/ping/ping.go b/plugins/ping/ping.go new file mode 100644 index 000000000..fd26b8a8b --- /dev/null +++ b/plugins/ping/ping.go @@ -0,0 +1,177 @@ +package ping + +import ( + "errors" + "os/exec" + "strconv" + "strings" + "sync" + + "github.com/influxdb/telegraf/plugins" +) + +// HostPinger is a function that runs the "ping" function using a list of +// passed arguments. This can be easily switched with a mocked ping function +// for unit test purposes (see ping_test.go) +type HostPinger func(args ...string) (string, error) + +type Ping struct { + // Interval at which to ping (ping -i ) + PingInterval float64 `toml:"ping_interval"` + + // Number of pings to send (ping -c ) + Count int + + // Ping timeout, in seconds. 0 means no timeout (ping -t ) + Timeout float64 + + // Interface to send ping from (ping -I ) + Interface string + + // URLs to ping + Urls []string + + // host ping function + pingHost HostPinger +} + +func (_ *Ping) Description() string { + return "Ping given url(s) and return statistics" +} + +var sampleConfig = ` + # urls to ping + urls = ["www.google.com"] # required + # number of pings to send (ping -c ) + count = 1 # required + # interval, in s, at which to ping. 0 == default (ping -i ) + ping_interval = 0.0 + # ping timeout, in s. 0 == no timeout (ping -t ) + timeout = 0.0 + # interface to send ping from (ping -I ) + interface = "" +` + +func (_ *Ping) SampleConfig() string { + return sampleConfig +} + +func (p *Ping) Gather(acc plugins.Accumulator) error { + + var wg sync.WaitGroup + errorChannel := make(chan error, len(p.Urls)*2) + + // Spin off a go routine for each url to ping + for _, url := range p.Urls { + wg.Add(1) + go func(url string, acc plugins.Accumulator) { + defer wg.Done() + args := p.args(url) + out, err := p.pingHost(args...) + if err != nil { + // Combine go err + stderr output + errorChannel <- errors.New( + strings.TrimSpace(out) + ", " + err.Error()) + } + tags := map[string]string{"url": url} + trans, rec, avg, err := processPingOutput(out) + if err != nil { + // fatal error + errorChannel <- err + return + } + // Calculate packet loss percentage + loss := float64(trans-rec) / float64(trans) * 100.0 + acc.Add("packets_transmitted", trans, tags) + acc.Add("packets_received", rec, tags) + acc.Add("percent_packet_loss", loss, tags) + acc.Add("average_response_ms", avg, tags) + }(url, acc) + } + + wg.Wait() + close(errorChannel) + + // Get all errors and return them as one giant error + errorStrings := []string{} + for err := range errorChannel { + errorStrings = append(errorStrings, err.Error()) + } + + if len(errorStrings) == 0 { + return nil + } + return errors.New(strings.Join(errorStrings, "\n")) +} + +func hostPinger(args ...string) (string, error) { + c := exec.Command("ping", args...) + out, err := c.CombinedOutput() + return string(out), err +} + +// args returns the arguments for the 'ping' executable +func (p *Ping) args(url string) []string { + // Build the ping command args based on toml config + args := []string{"-c", strconv.Itoa(p.Count)} + if p.PingInterval > 0 { + args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', 1, 64)) + } + if p.Timeout > 0 { + args = append(args, "-t", strconv.FormatFloat(p.Timeout, 'f', 1, 64)) + } + if p.Interface != "" { + args = append(args, "-I", p.Interface) + } + args = append(args, url) + return args +} + +// processPingOutput takes in a string output from the ping command, like: +// +// PING www.google.com (173.194.115.84): 56 data bytes +// 64 bytes from 173.194.115.84: icmp_seq=0 ttl=54 time=52.172 ms +// 64 bytes from 173.194.115.84: icmp_seq=1 ttl=54 time=34.843 ms +// +// --- www.google.com ping statistics --- +// 2 packets transmitted, 2 packets received, 0.0% packet loss +// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms +// +// It returns (, , ) +func processPingOutput(out string) (int, int, float64, error) { + var trans, recv int + var avg float64 + // Set this error to nil if we find a 'transmitted' line + err := errors.New("Fatal error processing ping output") + lines := strings.Split(out, "\n") + for _, line := range lines { + if strings.Contains(line, "transmitted") && + strings.Contains(line, "received") { + err = nil + stats := strings.Split(line, ", ") + // Transmitted packets + trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0]) + if err != nil { + return trans, recv, avg, err + } + // Received packets + recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0]) + if err != nil { + return trans, recv, avg, err + } + } else if strings.Contains(line, "min/avg/max") { + stats := strings.Split(line, " = ")[1] + avg, err = strconv.ParseFloat(strings.Split(stats, "/")[1], 64) + if err != nil { + return trans, recv, avg, err + } + } + } + return trans, recv, avg, err +} + +func init() { + plugins.Add("ping", func() plugins.Plugin { + return &Ping{pingHost: hostPinger} + }) +} diff --git a/plugins/ping/ping_test.go b/plugins/ping/ping_test.go new file mode 100644 index 000000000..5fed0b6c8 --- /dev/null +++ b/plugins/ping/ping_test.go @@ -0,0 +1,218 @@ +package ping + +import ( + "errors" + "reflect" + "sort" + "testing" + + "github.com/influxdb/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +// BSD/Darwin ping output +var bsdPingOutput = ` +PING www.google.com (216.58.217.36): 56 data bytes +64 bytes from 216.58.217.36: icmp_seq=0 ttl=55 time=15.087 ms +64 bytes from 216.58.217.36: icmp_seq=1 ttl=55 time=21.564 ms +64 bytes from 216.58.217.36: icmp_seq=2 ttl=55 time=27.263 ms +64 bytes from 216.58.217.36: icmp_seq=3 ttl=55 time=18.828 ms +64 bytes from 216.58.217.36: icmp_seq=4 ttl=55 time=18.378 ms + +--- www.google.com ping statistics --- +5 packets transmitted, 5 packets received, 0.0% packet loss +round-trip min/avg/max/stddev = 15.087/20.224/27.263/4.076 ms +` + +// Linux ping output +var linuxPingOutput = ` +PING www.google.com (216.58.218.164) 56(84) bytes of data. +64 bytes from host.net (216.58.218.164): icmp_seq=1 ttl=63 time=35.2 ms +64 bytes from host.net (216.58.218.164): icmp_seq=2 ttl=63 time=42.3 ms +64 bytes from host.net (216.58.218.164): icmp_seq=3 ttl=63 time=45.1 ms +64 bytes from host.net (216.58.218.164): icmp_seq=4 ttl=63 time=43.5 ms +64 bytes from host.net (216.58.218.164): icmp_seq=5 ttl=63 time=51.8 ms + +--- www.google.com ping statistics --- +5 packets transmitted, 5 received, 0% packet loss, time 4010ms +rtt min/avg/max/mdev = 35.225/43.628/51.806/5.325 ms +` + +// Fatal ping output (invalid argument) +var fatalPingOutput = ` +ping: -i interval too short: Operation not permitted +` + +// Test that ping command output is processed properly +func TestProcessPingOutput(t *testing.T) { + trans, rec, avg, err := processPingOutput(bsdPingOutput) + assert.NoError(t, err) + assert.Equal(t, 5, trans, "5 packets were transmitted") + assert.Equal(t, 5, rec, "5 packets were transmitted") + assert.InDelta(t, 20.224, avg, 0.001) + + trans, rec, avg, err = processPingOutput(linuxPingOutput) + assert.NoError(t, err) + assert.Equal(t, 5, trans, "5 packets were transmitted") + assert.Equal(t, 5, rec, "5 packets were transmitted") + assert.InDelta(t, 43.628, avg, 0.001) +} + +// Test that processPingOutput returns an error when 'ping' fails to run, such +// as when an invalid argument is provided +func TestErrorProcessPingOutput(t *testing.T) { + _, _, _, err := processPingOutput(fatalPingOutput) + assert.Error(t, err, "Error was expected from processPingOutput") +} + +// Test that arg lists and created correctly +func TestArgs(t *testing.T) { + p := Ping{ + Count: 2, + } + + // Actual and Expected arg lists must be sorted for reflect.DeepEqual + + actual := p.args("www.google.com") + expected := []string{"-c", "2", "www.google.com"} + sort.Strings(actual) + sort.Strings(expected) + assert.True(t, reflect.DeepEqual(expected, actual), + "Expected: %s Actual: %s", expected, actual) + + p.Interface = "eth0" + actual = p.args("www.google.com") + expected = []string{"-c", "2", "-I", "eth0", "www.google.com"} + sort.Strings(actual) + sort.Strings(expected) + assert.True(t, reflect.DeepEqual(expected, actual), + "Expected: %s Actual: %s", expected, actual) + + p.Timeout = 12.0 + actual = p.args("www.google.com") + expected = []string{"-c", "2", "-I", "eth0", "-t", "12.0", "www.google.com"} + sort.Strings(actual) + sort.Strings(expected) + assert.True(t, reflect.DeepEqual(expected, actual), + "Expected: %s Actual: %s", expected, actual) + + p.PingInterval = 1.2 + actual = p.args("www.google.com") + expected = []string{"-c", "2", "-I", "eth0", "-t", "12.0", "-i", "1.2", + "www.google.com"} + sort.Strings(actual) + sort.Strings(expected) + assert.True(t, reflect.DeepEqual(expected, actual), + "Expected: %s Actual: %s", expected, actual) +} + +func mockHostPinger(args ...string) (string, error) { + return linuxPingOutput, nil +} + +// Test that Gather function works on a normal ping +func TestPingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com", "www.reddit.com"}, + pingHost: mockHostPinger, + } + + p.Gather(&acc) + tags := map[string]string{"url": "www.google.com"} + assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 5, tags)) + assert.NoError(t, acc.ValidateTaggedValue("packets_received", 5, tags)) + assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 0.0, tags)) + assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", + 43.628, tags)) + + tags = map[string]string{"url": "www.reddit.com"} + assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 5, tags)) + assert.NoError(t, acc.ValidateTaggedValue("packets_received", 5, tags)) + assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 0.0, tags)) + assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", + 43.628, tags)) +} + +var lossyPingOutput = ` +PING www.google.com (216.58.218.164) 56(84) bytes of data. +64 bytes from host.net (216.58.218.164): icmp_seq=1 ttl=63 time=35.2 ms +64 bytes from host.net (216.58.218.164): icmp_seq=3 ttl=63 time=45.1 ms +64 bytes from host.net (216.58.218.164): icmp_seq=5 ttl=63 time=51.8 ms + +--- www.google.com ping statistics --- +5 packets transmitted, 3 received, 40% packet loss, time 4010ms +rtt min/avg/max/mdev = 35.225/44.033/51.806/5.325 ms +` + +func mockLossyHostPinger(args ...string) (string, error) { + return lossyPingOutput, nil +} + +// Test that Gather works on a ping with lossy packets +func TestLossyPingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com"}, + pingHost: mockLossyHostPinger, + } + + p.Gather(&acc) + tags := map[string]string{"url": "www.google.com"} + assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 5, tags)) + assert.NoError(t, acc.ValidateTaggedValue("packets_received", 3, tags)) + assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 40.0, tags)) + assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", 44.033, tags)) +} + +var errorPingOutput = ` +PING www.amazon.com (176.32.98.166): 56 data bytes +Request timeout for icmp_seq 0 + +--- www.amazon.com ping statistics --- +2 packets transmitted, 0 packets received, 100.0% packet loss +` + +func mockErrorHostPinger(args ...string) (string, error) { + return errorPingOutput, errors.New("No packets received") +} + +// Test that Gather works on a ping with no transmitted packets, even though the +// command returns an error +func TestBadPingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.amazon.com"}, + pingHost: mockErrorHostPinger, + } + + p.Gather(&acc) + tags := map[string]string{"url": "www.amazon.com"} + assert.NoError(t, acc.ValidateTaggedValue("packets_transmitted", 2, tags)) + assert.NoError(t, acc.ValidateTaggedValue("packets_received", 0, tags)) + assert.NoError(t, acc.ValidateTaggedValue("percent_packet_loss", 100.0, tags)) + assert.NoError(t, acc.ValidateTaggedValue("average_response_ms", 0.0, tags)) +} + +func mockFatalHostPinger(args ...string) (string, error) { + return fatalPingOutput, errors.New("So very bad") +} + +// Test that a fatal ping command does not gather any statistics. +func TestFatalPingGather(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.amazon.com"}, + pingHost: mockFatalHostPinger, + } + + p.Gather(&acc) + assert.False(t, acc.HasMeasurement("packets_transmitted"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasMeasurement("packets_received"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasMeasurement("percent_packet_loss"), + "Fatal ping should not have packet measurements") + assert.False(t, acc.HasMeasurement("average_response_ms"), + "Fatal ping should not have packet measurements") +} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index db3a67e66..3d9d40827 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -151,3 +151,14 @@ func (a *Accumulator) HasFloatValue(measurement string) bool { return false } + +// HasMeasurement returns true if the accumulator has a measurement with the +// given name +func (a *Accumulator) HasMeasurement(measurement string) bool { + for _, p := range a.Points { + if p.Measurement == measurement { + return true + } + } + return false +} From 6d42973d7c2e160f2952ae2aab9ce49d79049031 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 4 Sep 2015 12:53:23 -0600 Subject: [PATCH 040/125] Update package script and readme for 0.1.8 --- README.md | 4 ++-- package.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index f3f14aac3..c48549889 100644 --- a/README.md +++ b/README.md @@ -30,8 +30,8 @@ are some InfluxDB compatibility requirements: * InfluxDB 0.9.2 and prior requires Telegraf 0.1.4 Latest: -* http://get.influxdb.org/telegraf/telegraf_0.1.7_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.1.7-1.x86_64.rpm +* http://get.influxdb.org/telegraf/telegraf_0.1.8_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.1.8-1.x86_64.rpm 0.1.4: * http://get.influxdb.org/telegraf/telegraf_0.1.4_amd64.deb diff --git a/package.sh b/package.sh index da87b6bbf..1a991b111 100755 --- a/package.sh +++ b/package.sh @@ -307,8 +307,8 @@ if [ "$CIRCLE_BRANCH" == "" ]; then # Upload binaries for b in ${BINS[*]}; do - bin = $GOPATH_INSTALL/bin/$b - zippedbin = $b_$VERSION_linux_x86_64.tar.gz + bin=$GOPATH_INSTALL/bin/$b + zippedbin=$b_$VERSION_linux_x86_64.tar.gz # Zip the binary tar -zcf $TMP_WORK_DIR/$zippedbin $bin echo "Uploading binary: $zippedbin to S3" From e47801074e4e3ecfbcec5225e650c1250525b10c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 4 Sep 2015 13:19:13 -0600 Subject: [PATCH 041/125] package.sh script fixes for uploading binaries --- package.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/package.sh b/package.sh index 1a991b111..facd09c3b 100755 --- a/package.sh +++ b/package.sh @@ -307,10 +307,9 @@ if [ "$CIRCLE_BRANCH" == "" ]; then # Upload binaries for b in ${BINS[*]}; do - bin=$GOPATH_INSTALL/bin/$b - zippedbin=$b_$VERSION_linux_x86_64.tar.gz + zippedbin=${b}_${VERSION}_linux_x86_64.tar.gz # Zip the binary - tar -zcf $TMP_WORK_DIR/$zippedbin $bin + tar -zcf $TMP_WORK_DIR/$zippedbin -C $GOPATH_INSTALL/bin ./$b echo "Uploading binary: $zippedbin to S3" AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $TMP_WORK_DIR/$zippedbin \ s3://get.influxdb.org/telegraf/$zippedbin \ From 6260dd101857692cbcadfadd26d00e70ccdcb311 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 4 Sep 2015 14:12:50 -0600 Subject: [PATCH 042/125] Makefile rule for building all linux binaries, and upload all ARCHs --- Makefile | 11 +++++++++++ README.md | 3 +++ package.sh | 10 ++++++---- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 021420b5a..89db1d5b0 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,17 @@ build: prepare "-X main.Version $(VERSION)" \ ./cmd/telegraf/telegraf.go +build-linux-bins: prepare + GOARCH=amd64 GOOS=linux $(GOPATH)/bin/godep go build -o telegraf_linux_amd64 \ + -ldflags "-X main.Version $(VERSION)" \ + ./cmd/telegraf/telegraf.go + GOARCH=386 GOOS=linux $(GOPATH)/bin/godep go build -o telegraf_linux_386 \ + -ldflags "-X main.Version $(VERSION)" \ + ./cmd/telegraf/telegraf.go + GOARCH=arm GOOS=linux $(GOPATH)/bin/godep go build -o telegraf_linux_arm \ + -ldflags "-X main.Version $(VERSION)" \ + ./cmd/telegraf/telegraf.go + prepare: go get github.com/tools/godep diff --git a/README.md b/README.md index c48549889..ff7d772d6 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,9 @@ Latest: * http://get.influxdb.org/telegraf/telegraf_0.1.8_amd64.deb * http://get.influxdb.org/telegraf/telegraf-0.1.8-1.x86_64.rpm +Binaries: +* http://get.influxdb.org/telegraf/telegraf_0.1.8_linux_x86_64.tar.gz + 0.1.4: * http://get.influxdb.org/telegraf/telegraf_0.1.4_amd64.deb * http://get.influxdb.org/telegraf/telegraf-0.1.4-1.x86_64.rpm diff --git a/package.sh b/package.sh index facd09c3b..cea7cdf26 100755 --- a/package.sh +++ b/package.sh @@ -303,13 +303,15 @@ if [ "$CIRCLE_BRANCH" == "" ]; then echo "Upload failed -- aborting". cleanup_exit 1 fi + rm $filepath done - # Upload binaries - for b in ${BINS[*]}; do - zippedbin=${b}_${VERSION}_linux_x86_64.tar.gz + # Make and upload linux amd64, 386, and arm + make build-linux-bins + for b in `ls telegraf_*`; do + zippedbin=${b}_${VERSION}.tar.gz # Zip the binary - tar -zcf $TMP_WORK_DIR/$zippedbin -C $GOPATH_INSTALL/bin ./$b + tar -zcf $TMP_WORK_DIR/$zippedbin ./$b echo "Uploading binary: $zippedbin to S3" AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $TMP_WORK_DIR/$zippedbin \ s3://get.influxdb.org/telegraf/$zippedbin \ From b3044a6e2bbb4650561e386dd7291ad3779afd5c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 4 Sep 2015 16:37:07 -0600 Subject: [PATCH 043/125] Put all ARCH binaries on the README --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ff7d772d6..271dc0e62 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,9 @@ Latest: * http://get.influxdb.org/telegraf/telegraf-0.1.8-1.x86_64.rpm Binaries: -* http://get.influxdb.org/telegraf/telegraf_0.1.8_linux_x86_64.tar.gz +* http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.1.8.tar.gz +* http://get.influxdb.org/telegraf/telegraf_linux_386_0.1.8.tar.gz +* http://get.influxdb.org/telegraf/telegraf_linux_arm_0.1.8.tar.gz 0.1.4: * http://get.influxdb.org/telegraf/telegraf_0.1.4_amd64.deb From 8c5e1ff0a0c52046aa8b35cda9474136253a79ea Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 4 Sep 2015 17:05:50 -0600 Subject: [PATCH 044/125] Update README plugins list --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 271dc0e62..a5c7c921d 100644 --- a/README.md +++ b/README.md @@ -149,6 +149,7 @@ Below is how to configure `tagpass` and `tagdrop` parameters (added in 0.1.5) Telegraf currently has support for collecting metrics from +* apache * disque * elasticsearch * exec (generic JSON-emitting executable plugin) @@ -161,12 +162,19 @@ Telegraf currently has support for collecting metrics from * mongodb * mysql * nginx +* ping * postgresql * prometheus * rabbitmq * redis * rethinkdb -* system (mem, CPU, load, etc.) +* system + * cpu + * mem + * io + * net + * disk + * swap We'll be adding support for many more over the coming months. Read on if you want to add support for another service or third-party API. From bf9992b6136526d4f82b5851275be4ccd8c1cb34 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 8 Sep 2015 15:08:40 -0600 Subject: [PATCH 045/125] Update telegraf.service and packaging script for systemd Deals with most of #170 --- package.sh | 51 ++++++++++++++++++++++++++++------------ scripts/telegraf.service | 2 ++ 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/package.sh b/package.sh index cea7cdf26..fd7055be7 100755 --- a/package.sh +++ b/package.sh @@ -39,6 +39,7 @@ LOGROTATE_DIR=/etc/logrotate.d SAMPLE_CONFIGURATION=etc/config.sample.toml LOGROTATE_CONFIGURATION=etc/logrotate.d/telegraf INITD_SCRIPT=scripts/init.sh +SYSTEMD_SCRIPT=scripts/telegraf.service TMP_WORK_DIR=`mktemp -d` POST_INSTALL_PATH=`mktemp` @@ -156,27 +157,41 @@ generate_postinstall_script() { cat <$POST_INSTALL_PATH rm -f $INSTALL_ROOT_DIR/telegraf rm -f $INSTALL_ROOT_DIR/init.sh -ln -s $INSTALL_ROOT_DIR/versions/$version/telegraf $INSTALL_ROOT_DIR/telegraf -ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh - -rm -f /etc/init.d/telegraf -ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/telegraf -chmod +x /etc/init.d/telegraf -if which update-rc.d > /dev/null 2>&1 ; then - update-rc.d -f telegraf remove - update-rc.d telegraf defaults -else - chkconfig --add telegraf -fi +ln -sfn $INSTALL_ROOT_DIR/versions/$version/telegraf $INSTALL_ROOT_DIR/telegraf if ! id telegraf >/dev/null 2>&1; then useradd --system -U -M telegraf fi + +# Systemd +if which systemctl > /dev/null 2>&1 ; then + cp $INSTALL_ROOT_DIR/versions/$version/scripts/telegraf.service \ + /lib/systemd/system/telegraf.service + systemctl enable telegraf + +# Sysv +else + ln -sfn $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh \ + $INSTALL_ROOT_DIR/init.sh + rm -f /etc/init.d/telegraf + ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/telegraf + chmod +x /etc/init.d/telegraf + # update-rc.d sysv service: + if which update-rc.d > /dev/null 2>&1 ; then + update-rc.d -f telegraf remove + update-rc.d telegraf defaults + # CentOS-style sysv: + else + chkconfig --add telegraf + fi + + mkdir -p $TELEGRAF_LOG_DIR + chown -R -L telegraf:telegraf $TELEGRAF_LOG_DIR +fi + chown -R -L telegraf:telegraf $INSTALL_ROOT_DIR chmod -R a+rX $INSTALL_ROOT_DIR -mkdir -p $TELEGRAF_LOG_DIR -chown -R -L telegraf:telegraf $TELEGRAF_LOG_DIR EOF echo "Post-install script created successfully at $POST_INSTALL_PATH" } @@ -213,13 +228,19 @@ done echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION" cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts - if [ $? -ne 0 ]; then echo "Failed to copy init.d script to packaging directory -- aborting." cleanup_exit 1 fi echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts" +cp $SYSTEMD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts +if [ $? -ne 0 ]; then + echo "Failed to copy systemd file to packaging directory -- aborting." + cleanup_exit 1 +fi +echo "$SYSTEMD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts" + cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/telegraf.conf if [ $? -ne 0 ]; then echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting." diff --git a/scripts/telegraf.service b/scripts/telegraf.service index a5a764fef..87fcd0f22 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -8,6 +8,8 @@ EnvironmentFile=-/etc/default/telegraf User=telegraf ExecStart=/opt/telegraf/telegraf -config /etc/opt/telegraf/telegraf.conf $TELEGRAF_OPTS Restart=on-failure +KillMode=process [Install] WantedBy=multi-user.target +Alias=telegraf.service From 0780ad4ad9fcea2d90a8610e94ca941ba2249d50 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Sep 2015 11:00:19 -0600 Subject: [PATCH 046/125] README updates for systemd and deb/rpm install --- CHANGELOG.md | 9 ++++- CONTRIBUTING.md | 31 ++++++++++++++++ README.md | 95 +++++++++++++++++++------------------------------ 3 files changed, 76 insertions(+), 59 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 26e307908..1222c9728 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,11 @@ -## v0.1.8 [unreleased] +## v0.1.9 [unreleased] + +### Features + +### Bugfixes +- [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support + +## v0.1.8 [2015-09-04] ### Release Notes - Telegraf will now write data in UTC at second precision by default diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c28e52131..3e426594e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -128,3 +128,34 @@ func init() { ## Outputs TODO: this section will describe requirements for contributing an output + +## Unit Tests + +### Execute short tests + +execute `make test-short` + +### Execute long tests + +As Telegraf collects metrics from several third-party services it becomes a +difficult task to mock each service as some of them have complicated protocols +which would take some time to replicate. + +To overcome this situation we've decided to use docker containers to provide a +fast and reproducible environment to test those services which require it. +For other situations +(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go ) +a simple mock will suffice. + +To execute Telegraf tests follow these simple steps: + +- Install docker compose following [these](https://docs.docker.com/compose/install/) +instructions + - mac users should be able to simply do `brew install boot2docker` + and `brew install docker-compose` +- execute `make test` + +### Unit test troubleshooting + +Try cleaning up your test environment by executing `make test-cleanup` and +re-running diff --git a/README.md b/README.md index a5c7c921d..748fd4890 100644 --- a/README.md +++ b/README.md @@ -1,46 +1,58 @@ # Telegraf - A native agent for InfluxDB [![Circle CI](https://circleci.com/gh/influxdb/telegraf.svg?style=svg)](https://circleci.com/gh/influxdb/telegraf) Telegraf is an agent written in Go for collecting metrics from the system it's -running on or from other services and writing them into InfluxDB. +running on, or from other services, and writing them into InfluxDB. Design goals are to have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics -from well known services (like Hadoop, or Postgres, or Redis) and third party +from well known services (like Hadoop, Postgres, or Redis) and third party APIs (like Mailchimp, AWS CloudWatch, or Google Analytics). We'll eagerly accept pull requests for new plugins and will manage the set of -plugins that Telegraf supports. See the bottom of this doc for instructions on +plugins that Telegraf supports. See the +[contributing guide](CONTRIBUTING.md) for instructions on writing new plugins. -## Quickstart +## Installation: -* Build from source or download telegraf: - -### Linux packages for Debian/Ubuntu and RHEL/CentOS: - -NOTE: version 0.1.4+ has introduced some breaking changes! A 0.1.4+ telegraf -agent is NOT backwards-compatible with a config file from 0.1.3 and below. -That being said, the difference is not huge, see below for an example on -how to setup the new config file. - -As well, due to a breaking change to the InfluxDB integer line-protocol, there +Due to a breaking change to the InfluxDB integer line-protocol, there are some InfluxDB compatibility requirements: -* InfluxDB 0.9.3+ (including nightly builds) requires Telegraf 0.1.5+ +* InfluxDB 0.9.3+ requires Telegraf 0.1.5+ * InfluxDB 0.9.2 and prior requires Telegraf 0.1.4 +### Linux deb and rpm packages: + Latest: * http://get.influxdb.org/telegraf/telegraf_0.1.8_amd64.deb * http://get.influxdb.org/telegraf/telegraf-0.1.8-1.x86_64.rpm -Binaries: +0.1.4: +* http://get.influxdb.org/telegraf/telegraf_0.1.4_amd64.deb +* http://get.influxdb.org/telegraf/telegraf-0.1.4-1.x86_64.rpm + +##### Package instructions: + +* Telegraf binary is installed in `/opt/telegraf/telegraf` +* Telegraf daemon configuration file is in `/etc/opt/telegraf/telegraf.conf` +* On sysv systems, the telegraf daemon can be controlled via +`service telegraf [action]` +* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be +controlled via `systemctl [action] telegraf` + +### Linux binaries: + +Latest: * http://get.influxdb.org/telegraf/telegraf_linux_amd64_0.1.8.tar.gz * http://get.influxdb.org/telegraf/telegraf_linux_386_0.1.8.tar.gz * http://get.influxdb.org/telegraf/telegraf_linux_arm_0.1.8.tar.gz -0.1.4: -* http://get.influxdb.org/telegraf/telegraf_0.1.4_amd64.deb -* http://get.influxdb.org/telegraf/telegraf-0.1.4-1.x86_64.rpm +##### Binary instructions: + +These are standalone binaries that can be unpacked and executed on any linux +system. They can be unpacked and renamed in a location such as +`/usr/local/bin` for convenience. A config file will need to be generated, +see "How to use it" below. ### OSX via Homebrew: @@ -62,19 +74,17 @@ if you don't have it already. You also must build with golang version 1.4+ ### How to use it: -* Run `telegraf -sample-config > telegraf.toml` to create an initial configuration +* Run `telegraf -sample-config > telegraf.conf` to create an initial configuration * Edit the configuration to match your needs -* Run `telegraf -config telegraf.toml -test` to output one full measurement sample to STDOUT -* Run `telegraf -config telegraf.toml` to gather and send metrics to configured outputs. -* Run `telegraf -config telegraf.toml -filter system:swap` +* Run `telegraf -config telegraf.conf -test` to output one full measurement sample to STDOUT +* Run `telegraf -config telegraf.conf` to gather and send metrics to configured outputs. +* Run `telegraf -config telegraf.conf -filter system:swap` to enable only the system & swap plugins defined in the config. ## Telegraf Options Telegraf has a few options you can configure under the `agent` section of the -config. If you don't see an `agent` section run -`telegraf -sample-config > telegraf.toml` to create a valid initial -configuration: +config. * **hostname**: The hostname is passed as a tag. By default this will be the value retured by `hostname` on the machine running Telegraf. @@ -194,36 +204,5 @@ found by running `telegraf -sample-config` ## Contributing Please see the -[contributing guide](https://github.com/influxdb/telegraf/blob/master/CONTRIBUTING.md) +[contributing guide](CONTRIBUTING.md) for details on contributing a plugin or output to Telegraf - -## Testing - -### Execute short tests - -execute `make test-short` - -### Execute long tests - -As Telegraf collects metrics from several third-party services it becomes a -difficult task to mock each service as some of them have complicated protocols -which would take some time to replicate. - -To overcome this situation we've decided to use docker containers to provide a -fast and reproducible environment to test those services which require it. -For other situations -(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go ) -a simple mock will suffice. - -To execute Telegraf tests follow these simple steps: - -- Install docker compose following [these](https://docs.docker.com/compose/install/) -instructions - - mac users should be able to simply do `brew install boot2docker` - and `brew install docker-compose` -- execute `make test` - -### Unit test troubleshooting - -Try cleaning up your test environment by executing `make test-cleanup` and -re-running From a9b97c7a2bf30ac717663c3484e089761749bf70 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Sep 2015 12:07:58 -0600 Subject: [PATCH 047/125] Bump go version number to 1.5 --- package.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.sh b/package.sh index fd7055be7..406e7fce3 100755 --- a/package.sh +++ b/package.sh @@ -50,7 +50,7 @@ MAINTAINER=support@influxdb.com VENDOR=InfluxDB DESCRIPTION="InfluxDB Telegraf agent" PKG_DEPS=(coreutils) -GO_VERSION="go1.4.2" +GO_VERSION="go1.5" GOPATH_INSTALL= BINS=( telegraf From a7ed46160ad68937a3c1385d28d736352e1fe883 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Sep 2015 12:19:07 -0600 Subject: [PATCH 048/125] Re-arrange repo files for root dir cleanup --- CONTRIBUTING.md | 6 ++++-- Makefile | 7 ++++--- circle.yml | 2 +- Vagrantfile => scripts/Vagrantfile | 2 +- circle-test.sh => scripts/circle-test.sh | 0 docker-compose.yml => scripts/docker-compose.yml | 0 package.sh => scripts/package.sh | 2 +- 7 files changed, 11 insertions(+), 8 deletions(-) rename Vagrantfile => scripts/Vagrantfile (94%) rename circle-test.sh => scripts/circle-test.sh (100%) rename docker-compose.yml => scripts/docker-compose.yml (100%) rename package.sh => scripts/package.sh (99%) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3e426594e..c08c50b5b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -151,10 +151,12 @@ To execute Telegraf tests follow these simple steps: - Install docker compose following [these](https://docs.docker.com/compose/install/) instructions - - mac users should be able to simply do `brew install boot2docker` - and `brew install docker-compose` - execute `make test` +**OSX users**: you will need to install `boot2docker` or `docker-machine`. +The Makefile will assume that you have a `docker-machine` box called `default` to +get the IP address. + ### Unit test troubleshooting Try cleaning up your test environment by executing `make test-cleanup` and diff --git a/Makefile b/Makefile index 89db1d5b0..90443b2f9 100644 --- a/Makefile +++ b/Makefile @@ -22,10 +22,11 @@ prepare: docker-compose: ifeq ($(UNAME), Darwin) - ADVERTISED_HOST=$(shell sh -c 'boot2docker ip') docker-compose up -d + ADVERTISED_HOST=$(shell sh -c 'boot2docker ip || docker-machine ip default') \ + docker-compose --file scripts/docker-compose.yml up -d endif ifeq ($(UNAME), Linux) - ADVERTISED_HOST=localhost docker-compose up -d + ADVERTISED_HOST=localhost docker-compose --file scripts/docker-compose.yml up -d endif test: prepare docker-compose @@ -35,6 +36,6 @@ test-short: prepare $(GOPATH)/bin/godep go test -short ./... test-cleanup: - docker-compose kill + docker-compose --file scripts/docker-compose.yml kill .PHONY: test diff --git a/circle.yml b/circle.yml index 2c82bcfa7..0dcd6fc9f 100644 --- a/circle.yml +++ b/circle.yml @@ -4,5 +4,5 @@ dependencies: test: override: - - bash circle-test.sh + - bash scripts/circle-test.sh diff --git a/Vagrantfile b/scripts/Vagrantfile similarity index 94% rename from Vagrantfile rename to scripts/Vagrantfile index 72124a8ac..3c0199bdb 100644 --- a/Vagrantfile +++ b/scripts/Vagrantfile @@ -7,7 +7,7 @@ VAGRANTFILE_API_VERSION = "2" Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.box = "ubuntu/trusty64" - config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/influxdb/telegraf", + config.vm.synced_folder "..", "/home/vagrant/go/src/github.com/influxdb/telegraf", type: "rsync", rsync__args: ["--verbose", "--archive", "--delete", "-z", "--safe-links"], rsync__exclude: ["./telegraf", ".vagrant/"] diff --git a/circle-test.sh b/scripts/circle-test.sh similarity index 100% rename from circle-test.sh rename to scripts/circle-test.sh diff --git a/docker-compose.yml b/scripts/docker-compose.yml similarity index 100% rename from docker-compose.yml rename to scripts/docker-compose.yml diff --git a/package.sh b/scripts/package.sh similarity index 99% rename from package.sh rename to scripts/package.sh index 406e7fce3..7f5196e33 100755 --- a/package.sh +++ b/scripts/package.sh @@ -204,7 +204,7 @@ if [ "$1" == "-h" ]; then fi VERSION=`git describe --always --tags | tr -d v` - +cd `git rev-parse --show-toplevel` echo -e "\nStarting package process, version: $VERSION\n" if [ "$CIRCLE_BRANCH" == "" ]; then From 3c7c8926fb436e683c6a74201b1c90f390ef419f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Sep 2015 15:56:10 -0600 Subject: [PATCH 049/125] Support InfluxDB clusters Closes #143 --- CHANGELOG.md | 5 ++ agent.go | 3 ++ etc/config.sample.toml | 2 +- outputs/influxdb/influxdb.go | 95 ++++++++++++++++++++++++++---------- 4 files changed, 78 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1222c9728..77c8dac8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,11 @@ ## v0.1.9 [unreleased] +### Release Notes +- InfluxDB output config change: `url` is now `urls`, and is a list. Config files +will still be backwards compatible if only `url` is specified. + ### Features +- [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support ### Bugfixes - [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support diff --git a/agent.go b/agent.go index 2bfa0cf8f..1f40eb04b 100644 --- a/agent.go +++ b/agent.go @@ -84,6 +84,9 @@ func NewAgent(config *Config) (*Agent, error) { // Connect connects to all configured outputs func (a *Agent) Connect() error { for _, o := range a.outputs { + if a.Debug { + log.Printf("Attempting connection to output: %s\n", o.name) + } err := o.output.Connect() if err != nil { return err diff --git a/etc/config.sample.toml b/etc/config.sample.toml index 5307b6df8..58c851bec 100644 --- a/etc/config.sample.toml +++ b/etc/config.sample.toml @@ -37,7 +37,7 @@ [outputs] [outputs.influxdb] # The full HTTP endpoint URL for your InfluxDB instance - url = "http://localhost:8086" # required. + urls = ["http://localhost:8086"] # required. # The target database for metrics. This database must already exist database = "telegraf" # required. diff --git a/outputs/influxdb/influxdb.go b/outputs/influxdb/influxdb.go index 5bb74b4e3..c47b6cd8c 100644 --- a/outputs/influxdb/influxdb.go +++ b/outputs/influxdb/influxdb.go @@ -1,8 +1,10 @@ package influxdb import ( + "errors" "fmt" "log" + "math/rand" "net/url" "strings" @@ -12,19 +14,23 @@ import ( ) type InfluxDB struct { + // URL is only for backwards compatability URL string + URLs []string `toml:"urls"` Username string Password string Database string UserAgent string Timeout t.Duration - conn *client.Client + conns []*client.Client } var sampleConfig = ` # The full HTTP endpoint URL for your InfluxDB instance - url = "http://localhost:8086" # required. + # Multiple urls can be specified for InfluxDB cluster support. Server to + # write to will be randomly chosen each interval. + urls = ["http://localhost:8086"] # required. # The target database for metrics. This database must already exist database = "telegraf" # required. @@ -42,33 +48,58 @@ var sampleConfig = ` ` func (i *InfluxDB) Connect() error { - u, err := url.Parse(i.URL) - if err != nil { - return err + var urls []*url.URL + for _, URL := range i.URLs { + u, err := url.Parse(URL) + if err != nil { + return err + } + urls = append(urls, u) } - c, err := client.NewClient(client.Config{ - URL: *u, - Username: i.Username, - Password: i.Password, - UserAgent: i.UserAgent, - Timeout: i.Timeout.Duration, - }) - - if err != nil { - return err + // Backward-compatability with single Influx URL config files + // This could eventually be removed in favor of specifying the urls as a list + if i.URL != "" { + u, err := url.Parse(i.URL) + if err != nil { + return err + } + urls = append(urls, u) } - _, err = c.Query(client.Query{ - Command: fmt.Sprintf("CREATE DATABASE %s", i.Database), - }) - - if err != nil && !strings.Contains(err.Error(), "database already exists") { - log.Fatal(err) + var conns []*client.Client + for _, parsed_url := range urls { + c, err := client.NewClient(client.Config{ + URL: *parsed_url, + Username: i.Username, + Password: i.Password, + UserAgent: i.UserAgent, + Timeout: i.Timeout.Duration, + }) + if err != nil { + return err + } + conns = append(conns, c) } - i.conn = c - return nil + // This will get set to nil if a successful connection is made + err := errors.New("Could not create database on any server") + + for _, conn := range conns { + _, e := conn.Query(client.Query{ + Command: fmt.Sprintf("CREATE DATABASE %s", i.Database), + }) + + if e != nil && !strings.Contains(e.Error(), "database already exists") { + log.Println("ERROR: " + e.Error()) + } else { + err = nil + break + } + } + + i.conns = conns + return err } func (i *InfluxDB) Close() error { @@ -84,12 +115,24 @@ func (i *InfluxDB) Description() string { return "Configuration for influxdb server to send metrics to" } +// Choose a random server in the cluster to write to until a successful write +// occurs, logging each unsuccessful. If all servers fail, return error. func (i *InfluxDB) Write(bp client.BatchPoints) error { bp.Database = i.Database - if _, err := i.conn.Write(bp); err != nil { - return err + + // This will get set to nil if a successful write occurs + err := errors.New("Could not write to any InfluxDB server in cluster") + + p := rand.Perm(len(i.conns)) + for _, n := range p { + if _, e := i.conns[n].Write(bp); e != nil { + log.Println("ERROR: " + e.Error()) + } else { + err = nil + break + } } - return nil + return err } func init() { From 81f4aa9a5dcd8746e370f746ec66954099badcaf Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 9 Sep 2015 21:27:58 -0600 Subject: [PATCH 050/125] Fix bug in setting the precision before gathering metrics Closes #175 --- CHANGELOG.md | 1 + accumulator.go | 6 +++++- agent.go | 17 +++++++++-------- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77c8dac8f..8477ba494 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ will still be backwards compatible if only `url` is specified. ### Bugfixes - [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support +- [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics ## v0.1.8 [2015-09-04] diff --git a/accumulator.go b/accumulator.go index b756cc16e..ea90b6620 100644 --- a/accumulator.go +++ b/accumulator.go @@ -25,7 +25,11 @@ type BatchPoints struct { } // Add adds a measurement -func (bp *BatchPoints) Add(measurement string, val interface{}, tags map[string]string) { +func (bp *BatchPoints) Add( + measurement string, + val interface{}, + tags map[string]string, +) { bp.mu.Lock() defer bp.mu.Unlock() diff --git a/agent.go b/agent.go index 1f40eb04b..12533d286 100644 --- a/agent.go +++ b/agent.go @@ -196,16 +196,17 @@ func (a *Agent) crankParallel() error { go func(plugin *runningPlugin) { defer wg.Done() - var acc BatchPoints - acc.Debug = a.Debug - acc.Prefix = plugin.name + "_" - acc.Config = plugin.config + var bp BatchPoints + bp.Debug = a.Debug + bp.Prefix = plugin.name + "_" + bp.Config = plugin.config + bp.Precision = a.Precision - if err := plugin.plugin.Gather(&acc); err != nil { + if err := plugin.plugin.Gather(&bp); err != nil { log.Printf("Error in plugin [%s]: %s", plugin.name, err) } - points <- &acc + points <- &bp }(plugin) } @@ -233,6 +234,7 @@ func (a *Agent) crank() error { var bp BatchPoints bp.Debug = a.Debug + bp.Precision = a.Precision for _, plugin := range a.plugins { bp.Prefix = plugin.name + "_" @@ -248,7 +250,6 @@ func (a *Agent) crank() error { if a.UTC { bp.Time = bp.Time.UTC() } - bp.Precision = a.Precision return a.flush(&bp) } @@ -266,6 +267,7 @@ func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) err bp.Prefix = plugin.name + "_" bp.Config = plugin.config + bp.Precision = a.Precision if err := plugin.plugin.Gather(&bp); err != nil { log.Printf("Error in plugin [%s]: %s", plugin.name, err) @@ -277,7 +279,6 @@ func (a *Agent) crankSeparate(shutdown chan struct{}, plugin *runningPlugin) err if a.UTC { bp.Time = bp.Time.UTC() } - bp.Precision = a.Precision if err := a.flush(&bp); err != nil { outerr = errors.New("Error encountered processing plugins & outputs") From a55f6498c86838412769f1991cf9ca5fabc80ca2 Mon Sep 17 00:00:00 2001 From: Vye Wilson Date: Thu, 10 Sep 2015 10:01:08 -0700 Subject: [PATCH 051/125] Makefile will now honor GOBIN, if set Closes #181 --- CHANGELOG.md | 1 + Makefile | 15 +++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8477ba494..5cea99e8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ will still be backwards compatible if only `url` is specified. ### Features - [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support +- [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! ### Bugfixes - [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support diff --git a/Makefile b/Makefile index 90443b2f9..a907f468d 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,22 @@ UNAME := $(shell sh -c 'uname') VERSION := $(shell sh -c 'git describe --always --tags') +ifndef GOBIN + GOBIN = $(GOPATH)/bin +endif build: prepare - $(GOPATH)/bin/godep go build -o telegraf -ldflags \ + $(GOBIN)/godep go build -o telegraf -ldflags \ "-X main.Version $(VERSION)" \ ./cmd/telegraf/telegraf.go build-linux-bins: prepare - GOARCH=amd64 GOOS=linux $(GOPATH)/bin/godep go build -o telegraf_linux_amd64 \ + GOARCH=amd64 GOOS=linux $(GOBIN)/godep go build -o telegraf_linux_amd64 \ -ldflags "-X main.Version $(VERSION)" \ ./cmd/telegraf/telegraf.go - GOARCH=386 GOOS=linux $(GOPATH)/bin/godep go build -o telegraf_linux_386 \ + GOARCH=386 GOOS=linux $(GOBIN)/godep go build -o telegraf_linux_386 \ -ldflags "-X main.Version $(VERSION)" \ ./cmd/telegraf/telegraf.go - GOARCH=arm GOOS=linux $(GOPATH)/bin/godep go build -o telegraf_linux_arm \ + GOARCH=arm GOOS=linux $(GOBIN)/godep go build -o telegraf_linux_arm \ -ldflags "-X main.Version $(VERSION)" \ ./cmd/telegraf/telegraf.go @@ -30,10 +33,10 @@ ifeq ($(UNAME), Linux) endif test: prepare docker-compose - $(GOPATH)/bin/godep go test -v ./... + $(GOBIN)/godep go test -v ./... test-short: prepare - $(GOPATH)/bin/godep go test -short ./... + $(GOBIN)/godep go test -short ./... test-cleanup: docker-compose --file scripts/docker-compose.yml kill From f7a43179904510e8aae8fb8a296d6e7b1011e5a1 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 10 Sep 2015 11:21:40 -0600 Subject: [PATCH 052/125] Fix multiple redis server bug, do not cache the TCP connections Fixes #178 --- CHANGELOG.md | 1 + plugins/redis/redis.go | 63 +++++++++++++++++++----------------------- 2 files changed, 29 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5cea99e8d..e0888ec75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ will still be backwards compatible if only `url` is specified. ### Bugfixes - [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support - [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics +- [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug ## v0.1.8 [2015-09-04] diff --git a/plugins/redis/redis.go b/plugins/redis/redis.go index 4a95772aa..30077bc4f 100644 --- a/plugins/redis/redis.go +++ b/plugins/redis/redis.go @@ -15,9 +15,6 @@ import ( type Redis struct { Servers []string - - c net.Conn - buf []byte } var sampleConfig = ` @@ -112,41 +109,37 @@ func (r *Redis) Gather(acc plugins.Accumulator) error { const defaultPort = "6379" func (r *Redis) gatherServer(addr *url.URL, acc plugins.Accumulator) error { - if r.c == nil { - - _, _, err := net.SplitHostPort(addr.Host) - if err != nil { - addr.Host = addr.Host + ":" + defaultPort - } - - c, err := net.Dial("tcp", addr.Host) - if err != nil { - return fmt.Errorf("Unable to connect to redis server '%s': %s", addr.Host, err) - } - - if addr.User != nil { - pwd, set := addr.User.Password() - if set && pwd != "" { - c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd))) - - rdr := bufio.NewReader(c) - - line, err := rdr.ReadString('\n') - if err != nil { - return err - } - if line[0] != '+' { - return fmt.Errorf("%s", strings.TrimSpace(line)[1:]) - } - } - } - - r.c = c + _, _, err := net.SplitHostPort(addr.Host) + if err != nil { + addr.Host = addr.Host + ":" + defaultPort } - r.c.Write([]byte("info\r\n")) + c, err := net.Dial("tcp", addr.Host) + if err != nil { + return fmt.Errorf("Unable to connect to redis server '%s': %s", addr.Host, err) + } + defer c.Close() - rdr := bufio.NewReader(r.c) + if addr.User != nil { + pwd, set := addr.User.Password() + if set && pwd != "" { + c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd))) + + rdr := bufio.NewReader(c) + + line, err := rdr.ReadString('\n') + if err != nil { + return err + } + if line[0] != '+' { + return fmt.Errorf("%s", strings.TrimSpace(line)[1:]) + } + } + } + + c.Write([]byte("info\r\n")) + + rdr := bufio.NewReader(c) line, err := rdr.ReadString('\n') if err != nil { From d8482cc286ad4afc78cc707702b5494450479d4a Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 10 Sep 2015 13:57:19 -0600 Subject: [PATCH 053/125] darwin net plugin fix, really need to godep vendor gopsutil --- CHANGELOG.md | 1 + plugins/system/ps/net/net_darwin.go | 20 +++++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e0888ec75..4973337c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ will still be backwards compatible if only `url` is specified. - [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support - [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics - [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug +- Fix net plugin on darwin ## v0.1.8 [2015-09-04] diff --git a/plugins/system/ps/net/net_darwin.go b/plugins/system/ps/net/net_darwin.go index a9843fea3..7fb21713c 100644 --- a/plugins/system/ps/net/net_darwin.go +++ b/plugins/system/ps/net/net_darwin.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/influxdb/telegraf/plugins/system/ps/common" + "github.com/shirou/gopsutil/common" ) func NetIOCounters(pernic bool) ([]NetIOCountersStat, error) { @@ -26,7 +26,7 @@ func NetIOCounters(pernic bool) ([]NetIOCountersStat, error) { // skip first line continue } - if common.StringContains(exists, values[0]) { + if common.StringsHas(exists, values[0]) { // skip if already get continue } @@ -38,11 +38,14 @@ func NetIOCounters(pernic bool) ([]NetIOCountersStat, error) { base = 0 } - parsed := make([]uint64, 0, 3) + parsed := make([]uint64, 0, 6) vv := []string{ - values[base+3], // PacketsRecv - values[base+4], // Errin - values[base+5], // Dropin + values[base+3], // Ipkts == PacketsRecv + values[base+4], // Ierrs == Errin + values[base+5], // Ibytes == BytesRecv + values[base+6], // Opkts == PacketsSent + values[base+7], // Oerrs == Errout + values[base+8], // Obytes == BytesSent } for _, target := range vv { if target == "-" { @@ -61,7 +64,10 @@ func NetIOCounters(pernic bool) ([]NetIOCountersStat, error) { Name: values[0], PacketsRecv: parsed[0], Errin: parsed[1], - Dropin: parsed[2], + BytesRecv: parsed[2], + PacketsSent: parsed[3], + Errout: parsed[4], + BytesSent: parsed[5], } ret = append(ret, n) } From bd00f46d8b239d24d30e96ed1c69c50d2f0e0334 Mon Sep 17 00:00:00 2001 From: Ruslan Islamgaliev Date: Thu, 10 Sep 2015 14:27:50 +0300 Subject: [PATCH 054/125] Fix docker stats to make it work on centos 7. issue #58 issue #84 --- CHANGELOG.md | 1 + plugins/system/ps/docker/docker_linux.go | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4973337c9..2cfaefda5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ will still be backwards compatible if only `url` is specified. - [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics - [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug - Fix net plugin on darwin +- [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee! ## v0.1.8 [2015-09-04] diff --git a/plugins/system/ps/docker/docker_linux.go b/plugins/system/ps/docker/docker_linux.go index 51f3bc9ca..4c274af28 100644 --- a/plugins/system/ps/docker/docker_linux.go +++ b/plugins/system/ps/docker/docker_linux.go @@ -4,6 +4,7 @@ package docker import ( "encoding/json" + "os" "os/exec" "path" "strconv" @@ -48,9 +49,13 @@ func CgroupCPU(containerid string, base string) (*cpu.CPUTimesStat, error) { if len(base) == 0 { base = "/sys/fs/cgroup/cpuacct/docker" } - path := path.Join(base, containerid, "cpuacct.stat") + statfile := path.Join(base, containerid, "cpuacct.stat") - lines, err := common.ReadLines(path) + if _, err := os.Stat(statfile); os.IsNotExist(err) { + statfile = path.Join("/sys/fs/cgroup/cpuacct/system.slice", "docker-" + containerid + ".scope", "cpuacct.stat") + } + + lines, err := common.ReadLines(statfile) if err != nil { return nil, err } @@ -86,12 +91,17 @@ func CgroupMem(containerid string, base string) (*CgroupMemStat, error) { if len(base) == 0 { base = "/sys/fs/cgroup/memory/docker" } - path := path.Join(base, containerid, "memory.stat") + statfile := path.Join(base, containerid, "memory.stat") + + if _, err := os.Stat(statfile); os.IsNotExist(err) { + statfile = path.Join("/sys/fs/cgroup/memory/system.slice", "docker-" + containerid + ".scope", "memory.stat") + } + // empty containerid means all cgroup if len(containerid) == 0 { containerid = "all" } - lines, err := common.ReadLines(path) + lines, err := common.ReadLines(statfile) if err != nil { return nil, err } From 11126cf4aedddd6089fa01d09bb1d765a8813f63 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Fri, 11 Sep 2015 16:21:43 -0700 Subject: [PATCH 055/125] Add a server name tag to the RabbitMQ server list Fixes #183 --- plugins/rabbitmq/rabbitmq.go | 9 +++++++-- plugins/system/ps/net/net_darwin.go | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/plugins/rabbitmq/rabbitmq.go b/plugins/rabbitmq/rabbitmq.go index 55b4b0a95..506e932ad 100644 --- a/plugins/rabbitmq/rabbitmq.go +++ b/plugins/rabbitmq/rabbitmq.go @@ -14,6 +14,7 @@ const DefaultURL = "http://localhost:15672" type Server struct { URL string + Name string Username string Password string Nodes []string @@ -70,6 +71,7 @@ type Node struct { var sampleConfig = ` # Specify servers via an array of tables [[rabbitmq.servers]] + # name = "rmq-server-1" # optional tag # url = "http://localhost:15672" # username = "guest" # password = "guest" @@ -117,7 +119,10 @@ func (r *RabbitMQ) gatherServer(serv *Server, acc plugins.Accumulator) error { return err } - tags := map[string]string{} + tags := map[string]string{"url": serv.URL} + if serv.Name != "" { + tags["name"] = serv.Name + } acc.Add("messages", overview.QueueTotals.Messages, tags) acc.Add("messages_ready", overview.QueueTotals.MessagesReady, tags) @@ -147,7 +152,7 @@ func (r *RabbitMQ) gatherServer(serv *Server, acc plugins.Accumulator) error { continue } - tags = map[string]string{"node": node.Name} + tags["node"] = node.Name acc.Add("disk_free", node.DiskFree, tags) acc.Add("disk_free_limit", node.DiskFreeLimit, tags) diff --git a/plugins/system/ps/net/net_darwin.go b/plugins/system/ps/net/net_darwin.go index 7fb21713c..0c518a736 100644 --- a/plugins/system/ps/net/net_darwin.go +++ b/plugins/system/ps/net/net_darwin.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/common" + "github.com/influxdb/telegraf/plugins/system/ps/common" ) func NetIOCounters(pernic bool) ([]NetIOCountersStat, error) { @@ -26,7 +26,7 @@ func NetIOCounters(pernic bool) ([]NetIOCountersStat, error) { // skip first line continue } - if common.StringsHas(exists, values[0]) { + if common.StringContains(exists, values[0]) { // skip if already get continue } From 6d6158ff08ce4c4aa597d09d98f6e34911b09d67 Mon Sep 17 00:00:00 2001 From: mced Date: Sat, 12 Sep 2015 19:11:40 +0200 Subject: [PATCH 056/125] [fix] mem_used_perc returns percentage of used mem Closes #189 --- CHANGELOG.md | 1 + plugins/system/ps/mem/mem_linux.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cfaefda5..3519407dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ will still be backwards compatible if only `url` is specified. - [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug - Fix net plugin on darwin - [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee! +- [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced! ## v0.1.8 [2015-09-04] diff --git a/plugins/system/ps/mem/mem_linux.go b/plugins/system/ps/mem/mem_linux.go index 42a49a2b6..b2519ddd5 100644 --- a/plugins/system/ps/mem/mem_linux.go +++ b/plugins/system/ps/mem/mem_linux.go @@ -45,7 +45,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { } ret.Available = ret.Free + ret.Buffers + ret.Cached ret.Used = ret.Total - ret.Free - ret.UsedPercent = float64(ret.Total-ret.Available) / float64(ret.Total) * 100.0 + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 return ret, nil } From 2ee7d5eeb614cbb6ee966459eb6e6db28859855e Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Thu, 10 Sep 2015 20:34:36 +0200 Subject: [PATCH 057/125] code improvements after running tests / compile step --- outputs/opentsdb/opentsdb.go | 151 +++++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 outputs/opentsdb/opentsdb.go diff --git a/outputs/opentsdb/opentsdb.go b/outputs/opentsdb/opentsdb.go new file mode 100644 index 000000000..1c29e731d --- /dev/null +++ b/outputs/opentsdb/opentsdb.go @@ -0,0 +1,151 @@ +package opentsdb + +import ( + "fmt" + "net" + "sort" + "strconv" + "strings" + "time" + + "github.com/influxdb/influxdb/client" + "github.com/influxdb/telegraf/outputs" +) + +type OpenTSDB struct { + Prefix string + + Host string + Port int +} + +var sampleConfig = ` + # prefix for metrics keys + prefix = "my.specific.prefix." + + ## Telnet Mode ## + # DNS name of the OpenTSDB server in telnet mode + host = "opentsdb.example.com" + + # Port of the OpenTSDB server in telnet mode + port = 4242 +` + +type MetricLine struct { + Metric string + Timestamp int64 + Value string + Tags string +} + +func (o *OpenTSDB) Connect() error { + // Test Connection to OpenTSDB Server + uri := fmt.Sprintf("%s:%d", o.Host, o.Port) + tcpAddr, err := net.ResolveTCPAddr("tcp", uri) + if err != nil { + return fmt.Errorf("OpenTSDB: TCP address cannot be resolved") + } + connection, err := net.DialTCP("tcp", nil, tcpAddr) + defer connection.Close() + if err != nil { + return fmt.Errorf("OpenTSDB: Telnet connect fail") + } + return nil +} + +func (o *OpenTSDB) Write(bp client.BatchPoints) error { + if len(bp.Points) == 0 { + return nil + } + var timeNow = time.Now() + // Send Data with telnet / socket communication + uri := fmt.Sprintf("%s:%d", o.Host, o.Port) + tcpAddr, _ := net.ResolveTCPAddr("tcp", uri) + connection, err := net.DialTCP("tcp", nil, tcpAddr) + if err != nil { + return fmt.Errorf("OpenTSDB: Telnet connect fail") + } + for _, pt := range bp.Points { + metric := &MetricLine{ + Metric: fmt.Sprintf("%s%s", o.Prefix, pt.Measurement), + Timestamp: timeNow.Unix(), + } + if metricValue, err := buildValue(bp, pt); err == nil { + metric.Value = metricValue + } + + tagsSlice := buildTags(bp.Tags, pt.Tags) + metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) + + messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags) + fmt.Print(messageLine) + _, err := connection.Write([]byte(messageLine)) + if err != nil { + fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error()) + } + } + defer connection.Close() + + return nil +} + +func buildTags(bpTags map[string]string, ptTags map[string]string) []string { + tags := make([]string, (len(bpTags) + len(ptTags))) + index := 0 + for k, v := range bpTags { + tags[index] = fmt.Sprintf("%s=%s", k, v) + index += 1 + } + for k, v := range ptTags { + tags[index] = fmt.Sprintf("%s=%s", k, v) + index += 1 + } + sort.Strings(tags) + return tags +} + +func buildValue(bp client.BatchPoints, pt client.Point) (string, error) { + var retv string + var v = pt.Fields["value"] + switch p := v.(type) { + case int64: + retv = IntToString(int64(p)) + case uint64: + retv = UIntToString(uint64(p)) + case float64: + retv = FloatToString(float64(p)) + default: + return retv, fmt.Errorf("undeterminable type for telegraf") + } + return retv, nil +} + +func IntToString(input_num int64) string { + return strconv.FormatInt(input_num, 10) +} + +func UIntToString(input_num uint64) string { + return strconv.FormatUint(input_num, 10) +} + +func FloatToString(input_num float64) string { + return strconv.FormatFloat(input_num, 'f', 6, 64) +} + +func (o *OpenTSDB) SampleConfig() string { + return sampleConfig +} + +func (o *OpenTSDB) Description() string { + return "Configuration for OpenTSDB server to send metrics to" +} + +func (o *OpenTSDB) Close() error { + return nil +} + +func init() { + outputs.Add("opentsdb", func() outputs.Output { + return &OpenTSDB{} + }) +} From cb887dee810ea9323290b7f3bd0a24e4c002cadf Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Thu, 10 Sep 2015 20:35:12 +0200 Subject: [PATCH 058/125] change/fix expected test result --- outputs/opentsdb/opentsdb_test.go | 53 +++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 outputs/opentsdb/opentsdb_test.go diff --git a/outputs/opentsdb/opentsdb_test.go b/outputs/opentsdb/opentsdb_test.go new file mode 100644 index 000000000..bd1bd8f7e --- /dev/null +++ b/outputs/opentsdb/opentsdb_test.go @@ -0,0 +1,53 @@ +package opentsdb + +import ( + "reflect" + "testing" +) + +var ( + fakeHost = "metrics.example.com" + fakePort = 4242 +) + +func fakeOpenTSDB() *OpenTSDB { + var o OpenTSDB + o.Host = fakeHost + o.Port = fakePort + return &o +} + +func TestBuildTagsTelnet(t *testing.T) { + var tagtests = []struct { + bpIn map[string]string + ptIn map[string]string + outTags []string + }{ + { + map[string]string{"one": "two"}, + map[string]string{"three": "four"}, + []string{"one=two", "three=four"}, + }, + { + map[string]string{"aaa": "bbb"}, + map[string]string{}, + []string{"aaa=bbb"}, + }, + { + map[string]string{"one": "two"}, + map[string]string{"aaa": "bbb"}, + []string{"aaa=bbb", "one=two"}, + }, + { + map[string]string{}, + map[string]string{}, + []string{}, + }, + } + for _, tt := range tagtests { + tags := buildTags(tt.bpIn, tt.ptIn) + if !reflect.DeepEqual(tags, tt.outTags) { + t.Errorf("\nexpected %+v\ngot %+v\n", tt.outTags, tags) + } + } +} From 2bf096cfc7a2f9f6faaf63d6d9414a6b924911c4 Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Thu, 10 Sep 2015 20:40:23 +0200 Subject: [PATCH 059/125] adds opentsdb telnet output plugin --- outputs/all/all.go | 1 + 1 file changed, 1 insertion(+) diff --git a/outputs/all/all.go b/outputs/all/all.go index 36d11ea61..8586174a5 100644 --- a/outputs/all/all.go +++ b/outputs/all/all.go @@ -4,4 +4,5 @@ import ( _ "github.com/influxdb/telegraf/outputs/datadog" _ "github.com/influxdb/telegraf/outputs/influxdb" _ "github.com/influxdb/telegraf/outputs/kafka" + _ "github.com/influxdb/telegraf/outputs/opentsdb" ) From 380146b75bc17e40976e72974c1303f9cd8e20a1 Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Thu, 10 Sep 2015 20:42:53 +0200 Subject: [PATCH 060/125] added opentsdb as sink --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 748fd4890..c39431141 100644 --- a/README.md +++ b/README.md @@ -200,6 +200,7 @@ found by running `telegraf -sample-config` * influxdb * kafka * datadog +* opentsdb ## Contributing From d2150efc1974b698876ba159602306cc76623e29 Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Thu, 10 Sep 2015 21:25:07 +0200 Subject: [PATCH 061/125] added readme as suggested / whished in #177 --- outputs/opentsdb/README.md | 78 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 outputs/opentsdb/README.md diff --git a/outputs/opentsdb/README.md b/outputs/opentsdb/README.md new file mode 100644 index 000000000..59a03d3fd --- /dev/null +++ b/outputs/opentsdb/README.md @@ -0,0 +1,78 @@ +# OpenTSDB Output Plugin + +This plugin writes to a OpenTSDB instance using the "telnet" mode + +## Transfer "Protocol" in the telnet mode + +The expected input from OpenTSDB is specified in the following way: + +``` +put +``` + +The telegraf output plugin adds an optional prefix to the metric keys so +that a subamount can be selected. + +``` +put <[prefix.]metric> +``` + +### Example + +``` +put nine.telegraf.system_load1 1441910356 0.430000 dc=homeoffice host=irimame scope=green +put nine.telegraf.system_load5 1441910356 0.580000 dc=homeoffice host=irimame scope=green +put nine.telegraf.system_load15 1441910356 0.730000 dc=homeoffice host=irimame scope=green +put nine.telegraf.system_uptime 1441910356 3655970.000000 dc=homeoffice host=irimame scope=green +put nine.telegraf.system_uptime_format 1441910356 dc=homeoffice host=irimame scope=green +put nine.telegraf.mem_total 1441910356 4145426432 dc=homeoffice host=irimame scope=green +... +put nine.telegraf.io_write_bytes 1441910366 0 dc=homeoffice host=irimame name=vda2 scope=green +put nine.telegraf.io_read_time 1441910366 0 dc=homeoffice host=irimame name=vda2 scope=green +put nine.telegraf.io_write_time 1441910366 0 dc=homeoffice host=irimame name=vda2 scope=green +put nine.telegraf.io_io_time 1441910366 0 dc=homeoffice host=irimame name=vda2 scope=green +put nine.telegraf.ping_packets_transmitted 1441910366 dc=homeoffice host=irimame scope=green url=www.google.com +put nine.telegraf.ping_packets_received 1441910366 dc=homeoffice host=irimame scope=green url=www.google.com +put nine.telegraf.ping_percent_packet_loss 1441910366 0.000000 dc=homeoffice host=irimame scope=green url=www.google.com +put nine.telegraf.ping_average_response_ms 1441910366 24.006000 dc=homeoffice host=irimame scope=green url=www.google.com +... +``` + +## + +The OpenTSDB interface can be simulated with this reader: + +``` +// opentsdb_telnet_mode_mock.go +package main + +import ( + "io" + "log" + "net" + "os" +) + +func main() { + l, err := net.Listen("tcp", "localhost:4242") + if err != nil { + log.Fatal(err) + } + defer l.Close() + for { + conn, err := l.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + io.Copy(os.Stdout, c) + }(conn) + } +} + +``` + +## Allowed values for metrics + +OpenTSDB allows `integers` and `floats` as input values \ No newline at end of file From 7e3beaf822826a6d9c4e0ba0313025d97ca3b88c Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Thu, 10 Sep 2015 21:27:34 +0200 Subject: [PATCH 062/125] fix spaces with gofmt --- outputs/opentsdb/opentsdb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/outputs/opentsdb/opentsdb.go b/outputs/opentsdb/opentsdb.go index 1c29e731d..b547c106f 100644 --- a/outputs/opentsdb/opentsdb.go +++ b/outputs/opentsdb/opentsdb.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdb/influxdb/client" - "github.com/influxdb/telegraf/outputs" + "github.com/influxdb/telegraf/outputs" ) type OpenTSDB struct { From 08b220a1fbfaa4ed9af0f73caf87e4688004dcaa Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Fri, 11 Sep 2015 22:24:53 +0200 Subject: [PATCH 063/125] added docker image unit test with OpenTSDB --- outputs/opentsdb/opentsdb_test.go | 31 ++++++++++++++++++++----------- scripts/docker-compose.yml | 6 ++++++ 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/outputs/opentsdb/opentsdb_test.go b/outputs/opentsdb/opentsdb_test.go index bd1bd8f7e..774c06953 100644 --- a/outputs/opentsdb/opentsdb_test.go +++ b/outputs/opentsdb/opentsdb_test.go @@ -3,20 +3,11 @@ package opentsdb import ( "reflect" "testing" -) -var ( - fakeHost = "metrics.example.com" - fakePort = 4242 + "github.com/influxdb/telegraf/testutil" + "github.com/stretchr/testify/require" ) -func fakeOpenTSDB() *OpenTSDB { - var o OpenTSDB - o.Host = fakeHost - o.Port = fakePort - return &o -} - func TestBuildTagsTelnet(t *testing.T) { var tagtests = []struct { bpIn map[string]string @@ -51,3 +42,21 @@ func TestBuildTagsTelnet(t *testing.T) { } } } +func TestWrite(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + o := &OpenTSDB{ + Host: testutil.GetLocalHost() , + Port: 24242, + } + + // Verify that we can connect to the OpenTSDB instance + err := o.Connect() + require.NoError(t, err) + + // Verify that we can successfully write data to OpenTSDB + err = o.Write(testutil.MockBatchPoints()) + require.NoError(t, err) +} diff --git a/scripts/docker-compose.yml b/scripts/docker-compose.yml index c51a0235b..a41cb67f4 100644 --- a/scripts/docker-compose.yml +++ b/scripts/docker-compose.yml @@ -25,3 +25,9 @@ kafka: environment: ADVERTISED_HOST: ADVERTISED_PORT: 9092 + +opentsdb: + image: lancope/opentsdb + ports: + - "24242:4242" + From fc41cc9878333a12d7fd8b0138a3acfc92db467a Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Sun, 13 Sep 2015 22:35:38 +0200 Subject: [PATCH 064/125] added prefix settings of the module and rearrange go test code --- outputs/opentsdb/opentsdb_test.go | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/outputs/opentsdb/opentsdb_test.go b/outputs/opentsdb/opentsdb_test.go index 774c06953..2db064243 100644 --- a/outputs/opentsdb/opentsdb_test.go +++ b/outputs/opentsdb/opentsdb_test.go @@ -4,8 +4,8 @@ import ( "reflect" "testing" - "github.com/influxdb/telegraf/testutil" - "github.com/stretchr/testify/require" + "github.com/influxdb/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestBuildTagsTelnet(t *testing.T) { @@ -43,20 +43,21 @@ func TestBuildTagsTelnet(t *testing.T) { } } func TestWrite(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } - o := &OpenTSDB{ - Host: testutil.GetLocalHost() , - Port: 24242, - } + o := &OpenTSDB{ + Host: testutil.GetLocalHost(), + Port: 24242, + Prefix: "prefix.test.", + } - // Verify that we can connect to the OpenTSDB instance - err := o.Connect() - require.NoError(t, err) + // Verify that we can connect to the OpenTSDB instance + err := o.Connect() + require.NoError(t, err) - // Verify that we can successfully write data to OpenTSDB - err = o.Write(testutil.MockBatchPoints()) - require.NoError(t, err) + // Verify that we can successfully write data to OpenTSDB + err = o.Write(testutil.MockBatchPoints()) + require.NoError(t, err) } From 9a0c0886ced9a83479f1a319b56d4fe19551e813 Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Mon, 14 Sep 2015 12:28:10 +0200 Subject: [PATCH 065/125] added more UNIT test cases for covering all parts of the code added debug statement for debugging OpenTSDB communication Closes #182 --- outputs/opentsdb/opentsdb.go | 18 +++++++++++++---- outputs/opentsdb/opentsdb_test.go | 32 +++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/outputs/opentsdb/opentsdb.go b/outputs/opentsdb/opentsdb.go index b547c106f..0060da8d0 100644 --- a/outputs/opentsdb/opentsdb.go +++ b/outputs/opentsdb/opentsdb.go @@ -17,6 +17,8 @@ type OpenTSDB struct { Host string Port int + + Debug bool } var sampleConfig = ` @@ -29,6 +31,9 @@ var sampleConfig = ` # Port of the OpenTSDB server in telnet mode port = 4242 + + # Debug true - Prints OpenTSDB communication + debug = false ` type MetricLine struct { @@ -70,15 +75,20 @@ func (o *OpenTSDB) Write(bp client.BatchPoints) error { Metric: fmt.Sprintf("%s%s", o.Prefix, pt.Measurement), Timestamp: timeNow.Unix(), } - if metricValue, err := buildValue(bp, pt); err == nil { - metric.Value = metricValue + metricValue, buildError := buildValue(bp, pt) + if buildError != nil { + fmt.Printf("OpenTSDB: %s\n", buildError.Error()) + continue } + metric.Value = metricValue tagsSlice := buildTags(bp.Tags, pt.Tags) metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) messageLine := fmt.Sprintf("put %s %v %s %s\n", metric.Metric, metric.Timestamp, metric.Value, metric.Tags) - fmt.Print(messageLine) + if o.Debug { + fmt.Print(messageLine) + } _, err := connection.Write([]byte(messageLine)) if err != nil { fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error()) @@ -115,7 +125,7 @@ func buildValue(bp client.BatchPoints, pt client.Point) (string, error) { case float64: retv = FloatToString(float64(p)) default: - return retv, fmt.Errorf("undeterminable type for telegraf") + return retv, fmt.Errorf("unexpected type %T with value %v for OpenTSDB", v, v) } return retv, nil } diff --git a/outputs/opentsdb/opentsdb_test.go b/outputs/opentsdb/opentsdb_test.go index 2db064243..e73b1ae2b 100644 --- a/outputs/opentsdb/opentsdb_test.go +++ b/outputs/opentsdb/opentsdb_test.go @@ -3,7 +3,9 @@ package opentsdb import ( "reflect" "testing" + "time" + "github.com/influxdb/influxdb/client" "github.com/influxdb/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -60,4 +62,34 @@ func TestWrite(t *testing.T) { // Verify that we can successfully write data to OpenTSDB err = o.Write(testutil.MockBatchPoints()) require.NoError(t, err) + + // Verify postive and negative test cases of writing data + var bp client.BatchPoints + bp.Time = time.Now() + bp.Tags = map[string]string{"testkey": "testvalue"} + bp.Points = []client.Point{ + { + Measurement: "justametric.float", + Fields: map[string]interface{}{"value": float64(1.0)}, + }, + { + Measurement: "justametric.int", + Fields: map[string]interface{}{"value": int64(123456789)}, + }, + { + Measurement: "justametric.uint", + Fields: map[string]interface{}{"value": uint64(123456789012345)}, + }, + { + Measurement: "justametric.string", + Fields: map[string]interface{}{"value": "Lorem Ipsum"}, + }, + { + Measurement: "justametric.anotherfloat", + Fields: map[string]interface{}{"value": float64(42.0)}, + }, + } + err = o.Write(bp) + require.NoError(t, err) + } From 50fcb3914da6df81b1962221065487c83cba64a6 Mon Sep 17 00:00:00 2001 From: Kevin Bouwkamp Date: Sun, 13 Sep 2015 04:30:38 -0400 Subject: [PATCH 066/125] Generating metric information dynamically. Makes compatible with postgresql versions < 9.2 --- plugins/postgresql/postgresql.go | 109 +++++++++++++------------- plugins/postgresql/postgresql_test.go | 31 ++++++++ 2 files changed, 85 insertions(+), 55 deletions(-) diff --git a/plugins/postgresql/postgresql.go b/plugins/postgresql/postgresql.go index 1a467fee9..a7a8d1acd 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/postgresql/postgresql.go @@ -1,7 +1,10 @@ package postgresql import ( + "bytes" "database/sql" + "fmt" + "strings" "github.com/influxdb/telegraf/plugins" @@ -9,8 +12,9 @@ import ( ) type Server struct { - Address string - Databases []string + Address string + Databases []string + OrderedColumns []string } type Postgresql struct { @@ -51,6 +55,7 @@ func (p *Postgresql) Description() string { } var localhost = &Server{Address: "sslmode=disable"} +var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} func (p *Postgresql) Gather(acc plugins.Accumulator) error { if len(p.Servers) == 0 { @@ -69,6 +74,8 @@ func (p *Postgresql) Gather(acc plugins.Accumulator) error { } func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error { + var query string + if serv.Address == "" || serv.Address == "localhost" { serv = localhost } @@ -81,77 +88,69 @@ func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error { defer db.Close() if len(serv.Databases) == 0 { - rows, err := db.Query(`SELECT * FROM pg_stat_database`) + query = `SELECT * FROM pg_stat_database` + } else { + query = fmt.Sprintf(`SELECT * FROM pg_stat_database WHERE datname IN ('%s')`, strings.Join(serv.Databases, "','")) + } + + rows, err := db.Query(query) + if err != nil { + return err + } + + defer rows.Close() + + serv.OrderedColumns, err = rows.Columns() + if err != nil { + return err + } + + for rows.Next() { + err := p.accRow(rows, acc, serv) if err != nil { return err } - - defer rows.Close() - - for rows.Next() { - err := p.accRow(rows, acc, serv.Address) - if err != nil { - return err - } - } - - return rows.Err() - } else { - for _, name := range serv.Databases { - row := db.QueryRow(`SELECT * FROM pg_stat_database WHERE datname=$1`, name) - - err := p.accRow(row, acc, serv.Address) - if err != nil { - return err - } - } } - return nil + return rows.Err() } type scanner interface { Scan(dest ...interface{}) error } -func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, server string) error { - var ignore interface{} - var name string - var commit, rollback, read, hit int64 - var returned, fetched, inserted, updated, deleted int64 - var conflicts, temp_files, temp_bytes, deadlocks int64 - var read_time, write_time float64 +func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) error { + var columnVars []interface{} + var dbname bytes.Buffer - err := row.Scan(&ignore, &name, &ignore, - &commit, &rollback, - &read, &hit, - &returned, &fetched, &inserted, &updated, &deleted, - &conflicts, &temp_files, &temp_bytes, - &deadlocks, &read_time, &write_time, - &ignore, - ) + columnMap := make(map[string]*interface{}) + + for _, column := range serv.OrderedColumns { + columnMap[column] = new(interface{}) + } + + for i := 0; i < len(columnMap); i++ { + columnVars = append(columnVars, columnMap[serv.OrderedColumns[i]]) + } + + err := row.Scan(columnVars...) if err != nil { return err } - tags := map[string]string{"server": server, "db": name} + dbnameChars := (*columnMap["datname"]).([]uint8) + for i := 0; i < len(dbnameChars); i++ { + dbname.WriteString(string(dbnameChars[i])) + } - acc.Add("xact_commit", commit, tags) - acc.Add("xact_rollback", rollback, tags) - acc.Add("blks_read", read, tags) - acc.Add("blks_hit", hit, tags) - acc.Add("tup_returned", returned, tags) - acc.Add("tup_fetched", fetched, tags) - acc.Add("tup_inserted", inserted, tags) - acc.Add("tup_updated", updated, tags) - acc.Add("tup_deleted", deleted, tags) - acc.Add("conflicts", conflicts, tags) - acc.Add("temp_files", temp_files, tags) - acc.Add("temp_bytes", temp_bytes, tags) - acc.Add("deadlocks", deadlocks, tags) - acc.Add("blk_read_time", read_time, tags) - acc.Add("blk_write_time", read_time, tags) + tags := map[string]string{"server": serv.Address, "db": dbname.String()} + + for col, val := range columnMap { + if !ignoredColumns[col] { + acc.Add(col, *val, tags) + } + } return nil } diff --git a/plugins/postgresql/postgresql_test.go b/plugins/postgresql/postgresql_test.go index 363d289f9..7910425f5 100644 --- a/plugins/postgresql/postgresql_test.go +++ b/plugins/postgresql/postgresql_test.go @@ -117,3 +117,34 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { assert.True(t, found) } + +func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { + // if testing.Short() { + // t.Skip("Skipping integration test in short mode") + // } + + p := &Postgresql{ + Servers: []*Server{ + { + Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", + testutil.GetLocalHost()), + }, + }, + } + + var acc testutil.Accumulator + + err := p.Gather(&acc) + require.NoError(t, err) + + var found bool + + for _, pnt := range acc.Points { + if pnt.Measurement == "datname" || pnt.Measurement == "datid" || pnt.Measurement == "stats_reset" { + found = true + break + } + } + + assert.False(t, found) +} From 2217fb8c58d0fdd0b5199af1006a443fee44dfea Mon Sep 17 00:00:00 2001 From: Kevin Bouwkamp Date: Sun, 13 Sep 2015 04:34:54 -0400 Subject: [PATCH 067/125] uncomment to skip test in short mode --- plugins/postgresql/postgresql_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/postgresql/postgresql_test.go b/plugins/postgresql/postgresql_test.go index 7910425f5..f05fddb68 100644 --- a/plugins/postgresql/postgresql_test.go +++ b/plugins/postgresql/postgresql_test.go @@ -119,9 +119,9 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { } func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { - // if testing.Short() { - // t.Skip("Skipping integration test in short mode") - // } + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } p := &Postgresql{ Servers: []*Server{ From 19c65729264e8b3fa3b6d0dbdb22bf8df2020a0c Mon Sep 17 00:00:00 2001 From: Kevin Bouwkamp Date: Sun, 13 Sep 2015 04:40:32 -0400 Subject: [PATCH 068/125] Add a few notes about the connection strings --- plugins/postgresql/postgresql.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/plugins/postgresql/postgresql.go b/plugins/postgresql/postgresql.go index a7a8d1acd..007ac20bf 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/postgresql/postgresql.go @@ -26,13 +26,17 @@ var sampleConfig = ` [[postgresql.servers]] # specify address via a url matching: - # postgres://[pqgotest[:password]]@localhost?sslmode=[disable|verify-ca|verify-full] + # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: - # host=localhost user=pqotest password=... sslmode=... + # host=localhost user=pqotest password=... sslmode=... dbname=app_production # # All connection parameters are optional. By default, the host is localhost # and the user is the currently running user. For localhost, we default # to sslmode=disable as well. + # Without the dbname parameter, the driver will default to a database + # with the same name as the user. This dbname is just for instantiating a + # connection with the server and doesn't restrict the databases we are trying + # to grab metrics for. # address = "sslmode=disable" From 76041e84e80b9efc08266b60b342ccf854f46979 Mon Sep 17 00:00:00 2001 From: Kevin Bouwkamp Date: Sun, 13 Sep 2015 04:43:08 -0400 Subject: [PATCH 069/125] fix some more indentation... --- plugins/postgresql/postgresql.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/postgresql/postgresql.go b/plugins/postgresql/postgresql.go index 007ac20bf..a4ffc6988 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/postgresql/postgresql.go @@ -33,10 +33,11 @@ var sampleConfig = ` # All connection parameters are optional. By default, the host is localhost # and the user is the currently running user. For localhost, we default # to sslmode=disable as well. - # Without the dbname parameter, the driver will default to a database - # with the same name as the user. This dbname is just for instantiating a - # connection with the server and doesn't restrict the databases we are trying - # to grab metrics for. + # + # Without the dbname parameter, the driver will default to a database + # with the same name as the user. This dbname is just for instantiating a + # connection with the server and doesn't restrict the databases we are trying + # to grab metrics for. # address = "sslmode=disable" From 3fa3b2d836ff3fa84732ed95157317228fbb9be1 Mon Sep 17 00:00:00 2001 From: Kevin Bouwkamp Date: Sun, 13 Sep 2015 12:51:50 -0400 Subject: [PATCH 070/125] add some comments --- plugins/postgresql/postgresql.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/postgresql/postgresql.go b/plugins/postgresql/postgresql.go index a4ffc6988..5310f7bd6 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/postgresql/postgresql.go @@ -105,13 +105,14 @@ func (p *Postgresql) gatherServer(serv *Server, acc plugins.Accumulator) error { defer rows.Close() + // grab the column information from the result serv.OrderedColumns, err = rows.Columns() if err != nil { return err } for rows.Next() { - err := p.accRow(rows, acc, serv) + err = p.accRow(rows, acc, serv) if err != nil { return err } @@ -128,22 +129,26 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) var columnVars []interface{} var dbname bytes.Buffer + // this is where we'll store the column name with its *interface{} columnMap := make(map[string]*interface{}) for _, column := range serv.OrderedColumns { columnMap[column] = new(interface{}) } + // populate the array of interface{} with the pointers in the right order for i := 0; i < len(columnMap); i++ { columnVars = append(columnVars, columnMap[serv.OrderedColumns[i]]) } + // deconstruct array of variables and send to Scan err := row.Scan(columnVars...) if err != nil { return err } + // extract the database name from the column map dbnameChars := (*columnMap["datname"]).([]uint8) for i := 0; i < len(dbnameChars); i++ { dbname.WriteString(string(dbnameChars[i])) From fa5753c579ba1edc0b82136dab4032c75c0c734b Mon Sep 17 00:00:00 2001 From: Kevin Bouwkamp Date: Sun, 13 Sep 2015 20:11:49 -0400 Subject: [PATCH 071/125] Makes the test also work across pg versions --- plugins/postgresql/postgresql.go | 10 ++++++++-- plugins/postgresql/postgresql_test.go | 23 +++++++++++++++++++++-- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/plugins/postgresql/postgresql.go b/plugins/postgresql/postgresql.go index 5310f7bd6..e72ada85f 100644 --- a/plugins/postgresql/postgresql.go +++ b/plugins/postgresql/postgresql.go @@ -21,6 +21,8 @@ type Postgresql struct { Servers []*Server } +var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} + var sampleConfig = ` # specify servers via an array of tables [[postgresql.servers]] @@ -59,8 +61,11 @@ func (p *Postgresql) Description() string { return "Read metrics from one or many postgresql servers" } +func (p *Postgresql) IgnoredColumns() map[string]bool { + return ignoredColumns +} + var localhost = &Server{Address: "sslmode=disable"} -var ignoredColumns = map[string]bool{"datid": true, "datname": true, "stats_reset": true} func (p *Postgresql) Gather(acc plugins.Accumulator) error { if len(p.Servers) == 0 { @@ -157,7 +162,8 @@ func (p *Postgresql) accRow(row scanner, acc plugins.Accumulator, serv *Server) tags := map[string]string{"server": serv.Address, "db": dbname.String()} for col, val := range columnMap { - if !ignoredColumns[col] { + _, ignore := ignoredColumns[col] + if !ignore { acc.Add(col, *val, tags) } } diff --git a/plugins/postgresql/postgresql_test.go b/plugins/postgresql/postgresql_test.go index f05fddb68..4c44addce 100644 --- a/plugins/postgresql/postgresql_test.go +++ b/plugins/postgresql/postgresql_test.go @@ -29,6 +29,11 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { err := p.Gather(&acc) require.NoError(t, err) + availableColumns := make(map[string]bool) + for _, col := range p.Servers[0].OrderedColumns { + availableColumns[col] = true + } + intMetrics := []string{ "xact_commit", "xact_rollback", @@ -43,6 +48,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "temp_files", "temp_bytes", "deadlocks", + "numbackends", } floatMetrics := []string{ @@ -50,13 +56,26 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "blk_write_time", } + metricsCounted := 0 + for _, metric := range intMetrics { - assert.True(t, acc.HasIntValue(metric)) + _, ok := availableColumns[metric] + if ok { + assert.True(t, acc.HasIntValue(metric)) + metricsCounted++ + } } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatValue(metric)) + _, ok := availableColumns[metric] + if ok { + assert.True(t, acc.HasFloatValue(metric)) + metricsCounted++ + } } + + assert.True(t, metricsCounted > 0) + assert.Equal(t, len(availableColumns) - len(p.IgnoredColumns()), metricsCounted) } func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { From d926a3b5daddb097b390dd080cd5d47bddedb1c9 Mon Sep 17 00:00:00 2001 From: Kevin Bouwkamp Date: Sun, 13 Sep 2015 20:22:19 -0400 Subject: [PATCH 072/125] no longer duplicate ignored columns here --- plugins/postgresql/postgresql_test.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/plugins/postgresql/postgresql_test.go b/plugins/postgresql/postgresql_test.go index 4c44addce..e9ff99e4e 100644 --- a/plugins/postgresql/postgresql_test.go +++ b/plugins/postgresql/postgresql_test.go @@ -156,14 +156,7 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { err := p.Gather(&acc) require.NoError(t, err) - var found bool - - for _, pnt := range acc.Points { - if pnt.Measurement == "datname" || pnt.Measurement == "datid" || pnt.Measurement == "stats_reset" { - found = true - break - } + for col := range p.IgnoredColumns() { + assert.False(t, acc.HasMeasurement(col)) } - - assert.False(t, found) } From aac9ba6c1eaa1497dfaf2a17abf34632adf408a2 Mon Sep 17 00:00:00 2001 From: Kevin Bouwkamp Date: Mon, 14 Sep 2015 20:50:07 -0400 Subject: [PATCH 073/125] add bugfix in CHANGELOG and some notes in pg README Closes #192 --- CHANGELOG.md | 1 + plugins/postgresql/README.md | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 plugins/postgresql/README.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 3519407dd..3e70cd98e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ will still be backwards compatible if only `url` is specified. - Fix net plugin on darwin - [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee! - [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced! +- [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+ ## v0.1.8 [2015-09-04] diff --git a/plugins/postgresql/README.md b/plugins/postgresql/README.md new file mode 100644 index 000000000..ce0ae18d6 --- /dev/null +++ b/plugins/postgresql/README.md @@ -0,0 +1,30 @@ +# PostgreSQL plugin + +This postgresql plugin provides metrics for your postgres database. It currently works with postgres versions 8.1+. It uses data from the built in _pg_stat_database_ view. The metrics recorded depend on your version of postgres. See table: +``` +pg version 9.2+ 9.1 8.3-9.0 8.1-8.2 7.4-8.0(unsupported) +--- --- --- ------- ------- ------- +datid* x x x x +datname* x x x x +numbackends x x x x x +xact_commit x x x x x +xact_rollback x x x x x +blks_read x x x x x +blks_hit x x x x x +tup_returned x x x +tup_fetched x x x +tup_inserted x x x +tup_updated x x x +tup_deleted x x x +conflicts x x +temp_files x +temp_bytes x +deadlocks x +blk_read_time x +blk_write_time x +stats_reset* x x +``` + +_* value ignored and therefore not recorded._ + +More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW) From 4d6f11b61feeb7bb50e12d23cabbe973b4752d9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Menassa?= Date: Tue, 15 Sep 2015 12:58:51 +0200 Subject: [PATCH 074/125] [fix] mem_used_perc returns percentage of used mem --- plugins/system/ps/mem/mem_darwin.go | 2 +- plugins/system/ps/mem/mem_freebsd.go | 2 +- plugins/system/ps/mem/mem_linux.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/system/ps/mem/mem_darwin.go b/plugins/system/ps/mem/mem_darwin.go index 43da44d1d..d52046e5e 100644 --- a/plugins/system/ps/mem/mem_darwin.go +++ b/plugins/system/ps/mem/mem_darwin.go @@ -61,7 +61,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { ret.Available = ret.Free + ret.Buffers + ret.Cached ret.Used = ret.Total - ret.Free - ret.UsedPercent = float64(ret.Total-ret.Available) / float64(ret.Total) * 100.0 + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 return ret, nil } diff --git a/plugins/system/ps/mem/mem_freebsd.go b/plugins/system/ps/mem/mem_freebsd.go index aa83a246d..4ceca9997 100644 --- a/plugins/system/ps/mem/mem_freebsd.go +++ b/plugins/system/ps/mem/mem_freebsd.go @@ -81,7 +81,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { ret.Available = ret.Free + ret.Buffers + ret.Cached ret.Used = ret.Total - ret.Free - ret.UsedPercent = float64(ret.Total-ret.Available) / float64(ret.Total) * 100.0 + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 return ret, nil } diff --git a/plugins/system/ps/mem/mem_linux.go b/plugins/system/ps/mem/mem_linux.go index 42a49a2b6..8322fe3f4 100644 --- a/plugins/system/ps/mem/mem_linux.go +++ b/plugins/system/ps/mem/mem_linux.go @@ -45,7 +45,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { } ret.Available = ret.Free + ret.Buffers + ret.Cached ret.Used = ret.Total - ret.Free - ret.UsedPercent = float64(ret.Total-ret.Available) / float64(ret.Total) * 100.0 + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 return ret, nil } @@ -63,7 +63,7 @@ func SwapMemory() (*SwapMemoryStat, error) { ret.Used = ret.Total - ret.Free //check Infinity if ret.Total != 0 { - ret.UsedPercent = float64(ret.Total-ret.Free) / float64(ret.Total) * 100.0 + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 } else { ret.UsedPercent = 0 } From f00d43aa0965969277ffe1f40730d2e27b67f563 Mon Sep 17 00:00:00 2001 From: Eugene Dementiev Date: Tue, 15 Sep 2015 21:16:53 +0300 Subject: [PATCH 075/125] Added amqp output --- Godeps/Godeps.json | 4 + .../src/github.com/streadway/amqp/.gitignore | 3 + .../src/github.com/streadway/amqp/.travis.yml | 14 + .../src/github.com/streadway/amqp/LICENSE | 23 + .../src/github.com/streadway/amqp/README.md | 81 + .../github.com/streadway/amqp/allocator.go | 106 + .../streadway/amqp/allocator_test.go | 90 + .../src/github.com/streadway/amqp/auth.go | 44 + .../src/github.com/streadway/amqp/certs.sh | 159 + .../src/github.com/streadway/amqp/channel.go | 1548 ++++++++ .../github.com/streadway/amqp/client_test.go | 559 +++ .../src/github.com/streadway/amqp/confirms.go | 93 + .../streadway/amqp/confirms_test.go | 119 + .../github.com/streadway/amqp/connection.go | 769 ++++ .../github.com/streadway/amqp/consumers.go | 118 + .../src/github.com/streadway/amqp/delivery.go | 173 + .../streadway/amqp/delivery_test.go | 33 + .../src/github.com/streadway/amqp/doc.go | 108 + .../streadway/amqp/examples_test.go | 393 ++ .../src/github.com/streadway/amqp/fuzz.go | 16 + .../src/github.com/streadway/amqp/gen.sh | 2 + .../streadway/amqp/integration_test.go | 1796 +++++++++ .../src/github.com/streadway/amqp/read.go | 447 +++ .../github.com/streadway/amqp/read_test.go | 22 + .../streadway/amqp/reconnect_test.go | 113 + .../src/github.com/streadway/amqp/return.go | 64 + .../github.com/streadway/amqp/shared_test.go | 71 + .../amqp/spec/amqp0-9-1.stripped.extended.xml | 537 +++ .../src/github.com/streadway/amqp/spec/gen.go | 536 +++ .../src/github.com/streadway/amqp/spec091.go | 3306 +++++++++++++++++ .../src/github.com/streadway/amqp/tls_test.go | 218 ++ .../src/github.com/streadway/amqp/types.go | 390 ++ .../src/github.com/streadway/amqp/uri.go | 170 + .../src/github.com/streadway/amqp/uri_test.go | 328 ++ .../src/github.com/streadway/amqp/write.go | 411 ++ outputs/all/all.go | 1 + outputs/amqp/amqp.go | 112 + outputs/amqp/amqp_test.go | 28 + 38 files changed, 13005 insertions(+) create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/.gitignore create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/LICENSE create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/README.md create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/allocator.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/allocator_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/auth.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/certs.sh create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/channel.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/client_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/confirms.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/confirms_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/connection.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/consumers.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/delivery.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/delivery_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/doc.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/examples_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/fuzz.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/gen.sh create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/integration_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/read.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/read_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/reconnect_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/return.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/shared_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/spec/gen.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/spec091.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/tls_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/types.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/uri.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/uri_test.go create mode 100644 Godeps/_workspace/src/github.com/streadway/amqp/write.go create mode 100644 outputs/amqp/amqp.go create mode 100644 outputs/amqp/amqp_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 8ffab7faa..f6a196913 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -166,6 +166,10 @@ "ImportPath": "github.com/samuel/go-zookeeper/zk", "Rev": "5bb5cfc093ad18a28148c578f8632cfdb4d802e4" }, + { + "ImportPath": "github.com/streadway/amqp", + "Rev": "f4879ba28fffbb576743b03622a9ff20461826b2" + }, { "ImportPath": "github.com/stretchr/objx", "Rev": "cbeaeb16a013161a98496fad62933b1d21786672" diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/.gitignore b/Godeps/_workspace/src/github.com/streadway/amqp/.gitignore new file mode 100644 index 000000000..58b0e8f32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/.gitignore @@ -0,0 +1,3 @@ +spec/spec +examples/simple-consumer/simple-consumer +examples/simple-producer/simple-producer diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/.travis.yml b/Godeps/_workspace/src/github.com/streadway/amqp/.travis.yml new file mode 100644 index 000000000..f1c275a2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.1 + - 1.4 + - 1.5 + +services: + - rabbitmq + +env: + - AMQP_URL=amqp://guest:guest@127.0.0.1:5672/ GOMAXPROCS=2 + +script: go test -v -tags integration ./... diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/LICENSE b/Godeps/_workspace/src/github.com/streadway/amqp/LICENSE new file mode 100644 index 000000000..243c0ce7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/README.md b/Godeps/_workspace/src/github.com/streadway/amqp/README.md new file mode 100644 index 000000000..c4291fb68 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/README.md @@ -0,0 +1,81 @@ +# AMQP + +AMQP 0.9.1 client with RabbitMQ extensions in Go. + +# Status + +*Beta* + +[![Build Status](https://secure.travis-ci.org/streadway/amqp.png)](http://travis-ci.org/streadway/amqp) + +API changes unlikely and will be discussed on [Github +issues](https://github.com/streadway/amqp/issues) along with any bugs or +enhancements. + +# Goals + +Provide an functional interface that closely represents the AMQP 0.9.1 model +targeted to RabbitMQ as a server. This includes the minimum necessary to +interact the semantics of the protocol. + +# Non-goals + +Things not intended to be supported. + + * Auto reconnect and re-synchronization of client and server topologies. + * Reconnection would require understanding the error paths when the + topology cannot be declared on reconnect. This would require a new set + of types and code paths that are best suited at the call-site of this + package. AMQP has a dynamic topology that needs all peers to agree. If + this doesn't happen, the behavior is undefined. Instead of producing a + possible interface with undefined behavior, this package is designed to + be simple for the caller to implement the necessary connection-time + topology declaration so that reconnection is trivial and encapsulated in + the caller's application code. + * AMQP Protocol negotiation for forward or backward compatibility. + * 0.9.1 is stable and widely deployed. Versions 0.10 and 1.0 are divergent + specifications that change the semantics and wire format of the protocol. + We will accept patches for other protocol support but have no plans for + implementation ourselves. + * Anything other than PLAIN and EXTERNAL authentication mechanisms. + * Keeping the mechanisms interface modular makes it possible to extend + outside of this package. If other mechanisms prove to be popular, then + we would accept patches to include them in this pacakge. + +# Usage + +See the 'examples' subdirectory for simple producers and consumers executables. +If you have a use-case in mind which isn't well-represented by the examples, +please file an issue. + +# Documentation + +Use [Godoc documentation](http://godoc.org/github.com/streadway/amqp) for +reference and usage. + +[RabbitMQ tutorials in +Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) are also +available. + +# Contributing + +Pull requests are very much welcomed. Create your pull request on a non-master +branch, make sure a test or example is included that covers your change and +your commits represent coherent changes that include a reason for the change. + +To run the integration tests, make sure you have RabbitMQ running on any host, +export the environment variable `AMQP_URL=amqp://host/` and run `go test -tags +integration`. TravisCI will also run the integration tests. + +Thanks to the [community of contributors](https://github.com/streadway/amqp/graphs/contributors). + +# External packages + + * Google App Engine Dialer support: [https://github.com/soundtrackyourbrand/gaeamqp](https://github.com/soundtrackyourbrand/gaeamqp) + * RabbitMQ examples in Go: [https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) + +# License + +BSD 2 clause - see LICENSE for more details. + + diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/allocator.go b/Godeps/_workspace/src/github.com/streadway/amqp/allocator.go new file mode 100644 index 000000000..928418826 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/allocator.go @@ -0,0 +1,106 @@ +package amqp + +import ( + "bytes" + "fmt" + "math/big" +) + +const ( + free = 0 + allocated = 1 +) + +// allocator maintains a bitset of allocated numbers. +type allocator struct { + pool *big.Int + last int + low int + high int +} + +// NewAllocator reserves and frees integers out of a range between low and +// high. +// +// O(N) worst case space used, where N is maximum allocated, divided by +// sizeof(big.Word) +func newAllocator(low, high int) *allocator { + return &allocator{ + pool: big.NewInt(0), + last: low, + low: low, + high: high, + } +} + +// String returns a string describing the contents of the allocator like +// "allocator[low..high] reserved..until" +// +// O(N) where N is high-low +func (a allocator) String() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "allocator[%d..%d]", a.low, a.high) + + for low := a.low; low <= a.high; low++ { + high := low + for a.reserved(high) && high <= a.high { + high++ + } + + if high > low+1 { + fmt.Fprintf(b, " %d..%d", low, high-1) + } else if high > low { + fmt.Fprintf(b, " %d", high-1) + } + + low = high + } + return b.String() +} + +// Next reserves and returns the next available number out of the range between +// low and high. If no number is available, false is returned. +// +// O(N) worst case runtime where N is allocated, but usually O(1) due to a +// rolling index into the oldest allocation. +func (a *allocator) next() (int, bool) { + wrapped := a.last + + // Find trailing bit + for ; a.last <= a.high; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + // Find preceeding free'd pool + a.last = a.low + + for ; a.last < wrapped; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + return 0, false +} + +// reserve claims the bit if it is not already claimed, returning true if +// succesfully claimed. +func (a *allocator) reserve(n int) bool { + if a.reserved(n) { + return false + } + a.pool.SetBit(a.pool, n-a.low, allocated) + return true +} + +// reserved returns true if the integer has been allocated +func (a *allocator) reserved(n int) bool { + return a.pool.Bit(n-a.low) == allocated +} + +// release frees the use of the number for another allocation +func (a *allocator) release(n int) { + a.pool.SetBit(a.pool, n-a.low, free) +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/allocator_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/allocator_test.go new file mode 100644 index 000000000..2d6fd5dba --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/allocator_test.go @@ -0,0 +1,90 @@ +package amqp + +import ( + "math/rand" + "testing" +) + +func TestAllocatorFirstShouldBeTheLow(t *testing.T) { + n, ok := newAllocator(1, 2).next() + if !ok { + t.Fatalf("expected to allocate between 1 and 2") + } + + if want, got := 1, n; want != got { + t.Fatalf("expected to first allocation to be 1") + } +} + +func TestAllocatorShouldBeBoundByHigh(t *testing.T) { + a := newAllocator(1, 2) + + if n, ok := a.next(); n != 1 || !ok { + t.Fatalf("expected to allocate between 1 and 2, got %d, %v", n, ok) + } + if n, ok := a.next(); n != 2 || !ok { + t.Fatalf("expected to allocate between 1 and 2, got %d, %v", n, ok) + } + if _, ok := a.next(); ok { + t.Fatalf("expected not to allocate outside of 1 and 2") + } +} + +func TestAllocatorStringShouldIncludeAllocatedRanges(t *testing.T) { + a := newAllocator(1, 10) + a.reserve(1) + a.reserve(2) + a.reserve(3) + a.reserve(5) + a.reserve(6) + a.reserve(8) + a.reserve(10) + + if want, got := "allocator[1..10] 1..3 5..6 8 10", a.String(); want != got { + t.Fatalf("expected String of %q, got %q", want, got) + } +} + +func TestAllocatorShouldReuseReleased(t *testing.T) { + a := newAllocator(1, 2) + + first, _ := a.next() + if want, got := 1, first; want != got { + t.Fatalf("expected allocation to be %d, got: %d", want, got) + } + + second, _ := a.next() + if want, got := 2, second; want != got { + t.Fatalf("expected allocation to be %d, got: %d", want, got) + } + + a.release(first) + + third, _ := a.next() + if want, got := first, third; want != got { + t.Fatalf("expected third allocation to be %d, got: %d", want, got) + } + + _, ok := a.next() + if want, got := false, ok; want != got { + t.Fatalf("expected fourth allocation to saturate the pool") + } +} + +func TestAllocatorReleasesKeepUpWithAllocationsForAllSizes(t *testing.T) { + const runs = 5 + const max = 13 + + for lim := 1; lim < 2<= lim { // fills the allocator + a.release(int(rand.Int63n(int64(lim)))) + } + if _, ok := a.next(); !ok { + t.Fatalf("expected %d runs of random release of size %d not to fail on allocation %d", runs, lim, i) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/auth.go b/Godeps/_workspace/src/github.com/streadway/amqp/auth.go new file mode 100644 index 000000000..bff7d7948 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/auth.go @@ -0,0 +1,44 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" +) + +// Authentication interface provides a means for different SASL authentication +// mechanisms to be used during connection tuning. +type Authentication interface { + Mechanism() string + Response() string +} + +// PlainAuth is a similar to Basic Auth in HTTP. +type PlainAuth struct { + Username string + Password string +} + +func (me *PlainAuth) Mechanism() string { + return "PLAIN" +} + +func (me *PlainAuth) Response() string { + return fmt.Sprintf("\000%s\000%s", me.Username, me.Password) +} + +// Finds the first mechanism preferred by the client that the server supports. +func pickSASLMechanism(client []Authentication, serverMechanisms []string) (auth Authentication, ok bool) { + for _, auth = range client { + for _, mech := range serverMechanisms { + if auth.Mechanism() == mech { + return auth, true + } + } + } + + return +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/certs.sh b/Godeps/_workspace/src/github.com/streadway/amqp/certs.sh new file mode 100644 index 000000000..834f42242 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/certs.sh @@ -0,0 +1,159 @@ +#!/bin/sh +# +# Creates the CA, server and client certs to be used by tls_test.go +# http://www.rabbitmq.com/ssl.html +# +# Copy stdout into the const section of tls_test.go or use for RabbitMQ +# +root=$PWD/certs + +if [ -f $root/ca/serial ]; then + echo >&2 "Previous installation found" + echo >&2 "Remove $root/ca and rerun to overwrite" + exit 1 +fi + +mkdir -p $root/ca/private +mkdir -p $root/ca/certs +mkdir -p $root/server +mkdir -p $root/client + +cd $root/ca + +chmod 700 private +touch index.txt +echo 'unique_subject = no' > index.txt.attr +echo '01' > serial +echo >openssl.cnf ' +[ ca ] +default_ca = testca + +[ testca ] +dir = . +certificate = $dir/cacert.pem +database = $dir/index.txt +new_certs_dir = $dir/certs +private_key = $dir/private/cakey.pem +serial = $dir/serial + +default_crl_days = 7 +default_days = 3650 +default_md = sha1 + +policy = testca_policy +x509_extensions = certificate_extensions + +[ testca_policy ] +commonName = supplied +stateOrProvinceName = optional +countryName = optional +emailAddress = optional +organizationName = optional +organizationalUnitName = optional + +[ certificate_extensions ] +basicConstraints = CA:false + +[ req ] +default_bits = 2048 +default_keyfile = ./private/cakey.pem +default_md = sha1 +prompt = yes +distinguished_name = root_ca_distinguished_name +x509_extensions = root_ca_extensions + +[ root_ca_distinguished_name ] +commonName = hostname + +[ root_ca_extensions ] +basicConstraints = CA:true +keyUsage = keyCertSign, cRLSign + +[ client_ca_extensions ] +basicConstraints = CA:false +keyUsage = digitalSignature +extendedKeyUsage = 1.3.6.1.5.5.7.3.2 + +[ server_ca_extensions ] +basicConstraints = CA:false +keyUsage = keyEncipherment +extendedKeyUsage = 1.3.6.1.5.5.7.3.1 +subjectAltName = @alt_names + +[ alt_names ] +IP.1 = 127.0.0.1 +' + +openssl req \ + -x509 \ + -nodes \ + -config openssl.cnf \ + -newkey rsa:2048 \ + -days 3650 \ + -subj "/CN=MyTestCA/" \ + -out cacert.pem \ + -outform PEM + +openssl x509 \ + -in cacert.pem \ + -out cacert.cer \ + -outform DER + +openssl genrsa -out $root/server/key.pem 2048 +openssl genrsa -out $root/client/key.pem 2048 + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=server/" \ + -key $root/server/key.pem \ + -out $root/server/req.pem \ + -outform PEM + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=client/" \ + -key $root/client/key.pem \ + -out $root/client/req.pem \ + -outform PEM + +openssl ca \ + -config openssl.cnf \ + -in $root/server/req.pem \ + -out $root/server/cert.pem \ + -notext \ + -batch \ + -extensions server_ca_extensions + +openssl ca \ + -config openssl.cnf \ + -in $root/client/req.pem \ + -out $root/client/cert.pem \ + -notext \ + -batch \ + -extensions client_ca_extensions + +cat <<-END +const caCert = \` +`cat $root/ca/cacert.pem` +\` + +const serverCert = \` +`cat $root/server/cert.pem` +\` + +const serverKey = \` +`cat $root/server/key.pem` +\` + +const clientCert = \` +`cat $root/client/cert.pem` +\` + +const clientKey = \` +`cat $root/client/key.pem` +\` +END diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/channel.go b/Godeps/_workspace/src/github.com/streadway/amqp/channel.go new file mode 100644 index 000000000..9cf93b4d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/channel.go @@ -0,0 +1,1548 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "reflect" + "sync" +) + +// 0 1 3 7 size+7 size+8 +// +------+---------+-------------+ +------------+ +-----------+ +// | type | channel | size | | payload | | frame-end | +// +------+---------+-------------+ +------------+ +-----------+ +// octet short long size octets octet +const frameHeaderSize = 1 + 2 + 4 + 1 + +/* +Channel represents an AMQP channel. Used as a context for valid message +exchange. Errors on methods with this Channel as a receiver means this channel +should be discarded and a new channel established. + +*/ +type Channel struct { + destructor sync.Once + sendM sync.Mutex // sequence channel frames + m sync.Mutex // struct field mutex + + connection *Connection + + rpc chan message + consumers *consumers + + id uint16 + + // true when we will never notify again + noNotify bool + + // Channel and Connection exceptions will be broadcast on these listeners. + closes []chan *Error + + // Listeners for active=true flow control. When true is sent to a listener, + // publishing should pause until false is sent to listeners. + flows []chan bool + + // Listeners for returned publishings for unroutable messages on mandatory + // publishings or undeliverable messages on immediate publishings. + returns []chan Return + + // Listeners for when the server notifies the client that + // a consumer has been cancelled. + cancels []chan string + + // Allocated when in confirm mode in order to track publish counter and order confirms + confirms *confirms + confirming bool + + // Selects on any errors from shutdown during RPC + errors chan *Error + + // State machine that manages frame order, must only be mutated by the connection + recv func(*Channel, frame) error + + // State that manages the send behavior after before and after shutdown, must + // only be mutated in shutdown() + send func(*Channel, message) error + + // Current state for frame re-assembly, only mutated from recv + message messageWithContent + header *headerFrame + body []byte +} + +// Constructs a new channel with the given framing rules +func newChannel(c *Connection, id uint16) *Channel { + return &Channel{ + connection: c, + id: id, + rpc: make(chan message), + consumers: makeConsumers(), + confirms: newConfirms(), + recv: (*Channel).recvMethod, + send: (*Channel).sendOpen, + errors: make(chan *Error, 1), + } +} + +// shutdown is called by Connection after the channel has been removed from the +// connection registry. +func (me *Channel) shutdown(e *Error) { + me.destructor.Do(func() { + me.m.Lock() + defer me.m.Unlock() + + // Broadcast abnormal shutdown + if e != nil { + for _, c := range me.closes { + c <- e + } + } + + me.send = (*Channel).sendClosed + + // Notify RPC if we're selecting + if e != nil { + me.errors <- e + } + + me.consumers.closeAll() + + for _, c := range me.closes { + close(c) + } + + for _, c := range me.flows { + close(c) + } + + for _, c := range me.returns { + close(c) + } + + for _, c := range me.cancels { + close(c) + } + + if me.confirms != nil { + me.confirms.Close() + } + + me.noNotify = true + }) +} + +func (me *Channel) open() error { + return me.call(&channelOpen{}, &channelOpenOk{}) +} + +// Performs a request/response call for when the message is not NoWait and is +// specified as Synchronous. +func (me *Channel) call(req message, res ...message) error { + if err := me.send(me, req); err != nil { + return err + } + + if req.wait() { + select { + case e := <-me.errors: + return e + + case msg := <-me.rpc: + if msg != nil { + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } else { + // RPC channel has been closed without an error, likely due to a hard + // error on the Connection. This indicates we have already been + // shutdown and if were waiting, will have returned from the errors chan. + return ErrClosed + } + } + } + + return nil +} + +func (me *Channel) sendClosed(msg message) (err error) { + me.sendM.Lock() + defer me.sendM.Unlock() + + // After a 'channel.close' is sent or received the only valid response is + // channel.close-ok + if _, ok := msg.(*channelCloseOk); ok { + return me.connection.send(&methodFrame{ + ChannelId: me.id, + Method: msg, + }) + } + + return ErrClosed +} + +func (me *Channel) sendOpen(msg message) (err error) { + me.sendM.Lock() + defer me.sendM.Unlock() + + if content, ok := msg.(messageWithContent); ok { + props, body := content.getContent() + class, _ := content.id() + size := me.connection.Config.FrameSize - frameHeaderSize + + if err = me.connection.send(&methodFrame{ + ChannelId: me.id, + Method: content, + }); err != nil { + return + } + + if err = me.connection.send(&headerFrame{ + ChannelId: me.id, + ClassId: class, + Size: uint64(len(body)), + Properties: props, + }); err != nil { + return + } + + for i, j := 0, size; i < len(body); i, j = j, j+size { + if j > len(body) { + j = len(body) + } + + if err = me.connection.send(&bodyFrame{ + ChannelId: me.id, + Body: body[i:j], + }); err != nil { + return + } + } + } else { + err = me.connection.send(&methodFrame{ + ChannelId: me.id, + Method: msg, + }) + } + + return +} + +// Eventually called via the state machine from the connection's reader +// goroutine, so assumes serialized access. +func (me *Channel) dispatch(msg message) { + switch m := msg.(type) { + case *channelClose: + me.connection.closeChannel(me, newError(m.ReplyCode, m.ReplyText)) + me.send(me, &channelCloseOk{}) + + case *channelFlow: + for _, c := range me.flows { + c <- m.Active + } + me.send(me, &channelFlowOk{Active: m.Active}) + + case *basicCancel: + for _, c := range me.cancels { + c <- m.ConsumerTag + } + me.send(me, &basicCancelOk{ConsumerTag: m.ConsumerTag}) + + case *basicReturn: + ret := newReturn(*m) + for _, c := range me.returns { + c <- *ret + } + + case *basicAck: + if me.confirming { + if m.Multiple { + me.confirms.Multiple(Confirmation{m.DeliveryTag, true}) + } else { + me.confirms.One(Confirmation{m.DeliveryTag, true}) + } + } + + case *basicNack: + if me.confirming { + if m.Multiple { + me.confirms.Multiple(Confirmation{m.DeliveryTag, false}) + } else { + me.confirms.One(Confirmation{m.DeliveryTag, false}) + } + } + + case *basicDeliver: + me.consumers.send(m.ConsumerTag, newDelivery(me, m)) + // TODO log failed consumer and close channel, this can happen when + // deliveries are in flight and a no-wait cancel has happened + + default: + me.rpc <- msg + } +} + +func (me *Channel) transition(f func(*Channel, frame) error) error { + me.recv = f + return nil +} + +func (me *Channel) recvMethod(f frame) error { + switch frame := f.(type) { + case *methodFrame: + if msg, ok := frame.Method.(messageWithContent); ok { + me.body = make([]byte, 0) + me.message = msg + return me.transition((*Channel).recvHeader) + } + + me.dispatch(frame.Method) // termination state + return me.transition((*Channel).recvMethod) + + case *headerFrame: + // drop + return me.transition((*Channel).recvMethod) + + case *bodyFrame: + // drop + return me.transition((*Channel).recvMethod) + + default: + panic("unexpected frame type") + } + + panic("unreachable") +} + +func (me *Channel) recvHeader(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return me.recvMethod(f) + + case *headerFrame: + // start collecting if we expect body frames + me.header = frame + + if frame.Size == 0 { + me.message.setContent(me.header.Properties, me.body) + me.dispatch(me.message) // termination state + return me.transition((*Channel).recvMethod) + } else { + return me.transition((*Channel).recvContent) + } + + case *bodyFrame: + // drop and reset + return me.transition((*Channel).recvMethod) + + default: + panic("unexpected frame type") + } + + panic("unreachable") +} + +// state after method + header and before the length +// defined by the header has been reached +func (me *Channel) recvContent(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return me.recvMethod(f) + + case *headerFrame: + // drop and reset + return me.transition((*Channel).recvMethod) + + case *bodyFrame: + me.body = append(me.body, frame.Body...) + + if uint64(len(me.body)) >= me.header.Size { + me.message.setContent(me.header.Properties, me.body) + me.dispatch(me.message) // termination state + return me.transition((*Channel).recvMethod) + } + + return me.transition((*Channel).recvContent) + + default: + panic("unexpected frame type") + } + + panic("unreachable") +} + +/* +Close initiate a clean channel closure by sending a close message with the error +code set to '200'. + +It is safe to call this method multiple times. + +*/ +func (me *Channel) Close() error { + defer me.connection.closeChannel(me, nil) + return me.call( + &channelClose{ReplyCode: replySuccess}, + &channelCloseOk{}, + ) +} + +/* +NotifyClose registers a listener for when the server sends a channel or +connection exception in the form of a Connection.Close or Channel.Close method. +Connection exceptions will be broadcast to all open channels and all channels +will be closed, where channel exceptions will only be broadcast to listeners to +this channel. + +The chan provided will be closed when the Channel is closed and on a +graceful close, no error will be sent. + +*/ +func (me *Channel) NotifyClose(c chan *Error) chan *Error { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.closes = append(me.closes, c) + } + + return c +} + +/* +NotifyFlow registers a listener for basic.flow methods sent by the server. +When `true` is sent on one of the listener channels, all publishers should +pause until a `false` is sent. + +The server may ask the producer to pause or restart the flow of Publishings +sent by on a channel. This is a simple flow-control mechanism that a server can +use to avoid overflowing its queues or otherwise finding itself receiving more +messages than it can process. Note that this method is not intended for window +control. It does not affect contents returned by basic.get-ok methods. + +When a new channel is opened, it is active (flow is active). Some +applications assume that channels are inactive until started. To emulate +this behavior a client MAY open the channel, then pause it. + +Publishers should respond to a flow messages as rapidly as possible and the +server may disconnect over producing channels that do not respect these +messages. + +basic.flow-ok methods will always be returned to the server regardless of +the number of listeners there are. + +To control the flow of deliveries from the server. Use the Channel.Flow() +method instead. + +Note: RabbitMQ will rather use TCP pushback on the network connection instead +of sending basic.flow. This means that if a single channel is producing too +much on the same connection, all channels using that connection will suffer, +including acknowledgments from deliveries. Use different Connections if you +desire to interleave consumers and producers in the same process to avoid your +basic.ack messages from getting rate limited with your basic.publish messages. + +*/ +func (me *Channel) NotifyFlow(c chan bool) chan bool { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.flows = append(me.flows, c) + } + + return c +} + +/* +NotifyReturn registers a listener for basic.return methods. These can be sent +from the server when a publish is undeliverable either from the mandatory or +immediate flags. + +A return struct has a copy of the Publishing along with some error +information about why the publishing failed. + +*/ +func (me *Channel) NotifyReturn(c chan Return) chan Return { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.returns = append(me.returns, c) + } + + return c +} + +/* +NotifyCancel registers a listener for basic.cancel methods. These can be sent +from the server when a queue is deleted or when consuming from a mirrored queue +where the master has just failed (and was moved to another node) + +The subscription tag is returned to the listener. + +*/ +func (me *Channel) NotifyCancel(c chan string) chan string { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.cancels = append(me.cancels, c) + } + + return c +} + +/* +NotifyConfirm calls NotifyPublish and starts a goroutines sending +ordered Ack and Nack DeliveryTag to the respective channels. + +For strict ordering, use NotifyPublish instead. +*/ +func (me *Channel) NotifyConfirm(ack, nack chan uint64) (chan uint64, chan uint64) { + confirms := me.NotifyPublish(make(chan Confirmation, len(ack)+len(nack))) + + go func() { + for c := range confirms { + if c.Ack { + ack <- c.DeliveryTag + } else { + nack <- c.DeliveryTag + } + } + close(ack) + if nack != ack { + close(nack) + } + }() + + return ack, nack +} + +/* +NotifyPublish registers a listener for reliable publishing. Receives from this +chan for every publish after Channel.Confirm will be in order starting with +DeliveryTag 1. + +There will be one and only one Confimration Publishing starting with the +delviery tag of 1 and progressing sequentially until the total number of +Publishings have been seen by the server. + +Acknowledgments will be received in the order of delivery from the +NotifyPublish channels even if the server acknowledges them out of order. + +The listener chan will be closed when the Channel is closed. + +The capacity of the chan Confirmation must be at least as large as the +number of outstanding publishings. Not having enough buffered chans will +create a deadlock if you attempt to perform other operations on the Connection +or Channel while confirms are in-flight. + +It's advisable to wait for all Confirmations to arrive before calling +Channel.Close() or Connection.Close(). + +*/ +func (me *Channel) NotifyPublish(confirm chan Confirmation) chan Confirmation { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(confirm) + } else { + me.confirms.Listen(confirm) + } + + return confirm + +} + +/* +Qos controls how many messages or how many bytes the server will try to keep on +the network for consumers before receiving delivery acks. The intent of Qos is +to make sure the network buffers stay full between the server and client. + +With a prefetch count greater than zero, the server will deliver that many +messages to consumers before acknowledgments are received. The server ignores +this option when consumers are started with noAck because no acknowledgments +are expected or sent. + +With a prefetch size greater than zero, the server will try to keep at least +that many bytes of deliveries flushed to the network before receiving +acknowledgments from the consumers. This option is ignored when consumers are +started with noAck. + +When global is true, these Qos settings apply to all existing and future +consumers on all channels on the same connection. When false, the Channel.Qos +settings will apply to all existing and future consumers on this channel. +RabbitMQ does not implement the global flag. + +To get round-robin behavior between consumers consuming from the same queue on +different connections, set the prefetch count to 1, and the next available +message on the server will be delivered to the next available consumer. + +If your consumer work time is reasonably consistent and not much greater +than two times your network round trip time, you will see significant +throughput improvements starting with a prefetch count of 2 or slightly +greater as described by benchmarks on RabbitMQ. + +http://www.rabbitmq.com/blog/2012/04/25/rabbitmq-performance-measurements-part-2/ +*/ +func (me *Channel) Qos(prefetchCount, prefetchSize int, global bool) error { + return me.call( + &basicQos{ + PrefetchCount: uint16(prefetchCount), + PrefetchSize: uint32(prefetchSize), + Global: global, + }, + &basicQosOk{}, + ) +} + +/* +Cancel stops deliveries to the consumer chan established in Channel.Consume and +identified by consumer. + +Only use this method to cleanly stop receiving deliveries from the server and +cleanly shut down the consumer chan identified by this tag. Using this method +and waiting for remaining messages to flush from the consumer chan will ensure +all messages received on the network will be delivered to the receiver of your +consumer chan. + +Continue consuming from the chan Delivery provided by Channel.Consume until the +chan closes. + +When noWait is true, do not wait for the server to acknowledge the cancel. +Only use this when you are certain there are no deliveries requiring +acknowledgment are in-flight otherwise they will arrive and be dropped in the +client without an ack and will not be redelivered to other consumers. + +*/ +func (me *Channel) Cancel(consumer string, noWait bool) error { + req := &basicCancel{ + ConsumerTag: consumer, + NoWait: noWait, + } + res := &basicCancelOk{} + + if err := me.call(req, res); err != nil { + return err + } + + if req.wait() { + me.consumers.close(res.ConsumerTag) + } else { + // Potentially could drop deliveries in flight + me.consumers.close(consumer) + } + + return nil +} + +/* +QueueDeclare declares a queue to hold messages and deliver to consumers. +Declaring creates a queue if it doesn't already exist, or ensures that an +existing queue matches the same parameters. + +Every queue declared gets a default binding to the empty exchange "" which has +the type "direct" with the routing key matching the queue's name. With this +default binding, it is possible to publish messages that route directly to +this queue by publishing to "" with the routing key of the queue name. + + QueueDeclare("alerts", true, false, false false, false, nil) + Publish("", "alerts", false, false, Publishing{Body: []byte("...")}) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alerts -> "" -> alerts -> alerts + +The queue name may be empty, in which the server will generate a unique name +which will be returned in the Name field of Queue struct. + +Durable and Non-Auto-Deleted queues will survive server restarts and remain +when there are no remaining consumers or bindings. Persistent publishings will +be restored in this queue on server restart. These queues are only able to be +bound to durable exchanges. + +Non-Durable and Auto-Deleted queues will not be redeclared on server restart +and will be deleted by the server after a short time when the last consumer is +canceled or the last consumer's channel is closed. Queues with this lifetime +can also be deleted normally with QueueDelete. These durable queues can only +be bound to non-durable exchanges. + +Non-Durable and Non-Auto-Deleted queues will remain declared as long as the +server is running regardless of how many consumers. This lifetime is useful +for temporary topologies that may have long delays between consumer activity. +These queues can only be bound to non-durable exchanges. + +Durable and Auto-Deleted queues will be restored on server restart, but without +active consumers, will not survive and be removed. This Lifetime is unlikely +to be useful. + +Exclusive queues are only accessible by the connection that declares them and +will be deleted when the connection closes. Channels on other connections +will receive an error when attempting declare, bind, consume, purge or delete a +queue with the same name. + +When noWait is true, the queue will assume to be declared on the server. A +channel exception will arrive if the conditions are met for existing queues +or attempting to modify an existing queue from a different connection. + +When the error return value is not nil, you can assume the queue could not be +declared with these parameters and the channel will be closed. + +*/ +func (me *Channel) QueueDeclare(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := me.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{ + Name: name, + }, nil + + panic("unreachable") +} + +/* + +QueueDeclarePassive is functionally and parametrically equivalent to +QueueDeclare, except that it sets the "passive" attribute to true. A passive +queue is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent queue will cause RabbitMQ to throw an exception. This function +can be used to test for the existence of a queue. + +*/ +func (me *Channel) QueueDeclarePassive(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := me.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{ + Name: name, + }, nil + + panic("unreachable") +} + +/* +QueueInspect passively declares a queue by name to inspect the current message +count, consumer count. + +Use this method to check how many unacknowledged messages reside in the queue +and how many consumers are receiving deliveries and whether a queue by this +name already exists. + +If the queue by this name exists, use Channel.QueueDeclare check if it is +declared with specific parameters. + +If a queue by this name does not exist, an error will be returned and the +channel will be closed. + +*/ +func (me *Channel) QueueInspect(name string) (Queue, error) { + req := &queueDeclare{ + Queue: name, + Passive: true, + } + res := &queueDeclareOk{} + + err := me.call(req, res) + + state := Queue{ + Name: name, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + } + + return state, err +} + +/* +QueueBind binds an exchange to a queue so that publishings to the exchange will +be routed to the queue when the publishing routing key matches the binding +routing key. + + QueueBind("pagers", "alert", "log", false, nil) + QueueBind("emails", "info", "log", false, nil) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> log ----> alert --> pagers + key: info ---> log ----> info ---> emails + key: debug --> log (none) (dropped) + +If a binding with the same key and arguments already exists between the +exchange and queue, the attempt to rebind will be ignored and the existing +binding will be retained. + +In the case that multiple bindings may cause the message to be routed to the +same queue, the server will only route the publishing once. This is possible +with topic exchanges. + + QueueBind("pagers", "alert", "amq.topic", false, nil) + QueueBind("emails", "info", "amq.topic", false, nil) + QueueBind("emails", "#", "amq.topic", false, nil) // match everything + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> amq.topic ----> alert --> pagers + key: info ---> amq.topic ----> # ------> emails + \---> info ---/ + key: debug --> amq.topic ----> # ------> emails + +It is only possible to bind a durable queue to a durable exchange regardless of +whether the queue or exchange is auto-deleted. Bindings between durable queues +and exchanges will also be restored on server restart. + +If the binding could not complete, an error will be returned and the channel +will be closed. + +When noWait is true and the queue could not be bound, the channel will be +closed with an error. + +*/ +func (me *Channel) QueueBind(name, key, exchange string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &queueBind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &queueBindOk{}, + ) +} + +/* +QueueUnbind removes a binding between an exchange and queue matching the key and +arguments. + +It is possible to send and empty string for the exchange name which means to +unbind the queue from the default exchange. + +*/ +func (me *Channel) QueueUnbind(name, key, exchange string, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &queueUnbind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + Arguments: args, + }, + &queueUnbindOk{}, + ) +} + +/* +QueuePurge removes all messages from the named queue which are not waiting to +be acknowledged. Messages that have been delivered but have not yet been +acknowledged will not be removed. + +When successful, returns the number of messages purged. + +If noWait is true, do not wait for the server response and the number of +messages purged will not be meaningful. +*/ +func (me *Channel) QueuePurge(name string, noWait bool) (int, error) { + req := &queuePurge{ + Queue: name, + NoWait: noWait, + } + res := &queuePurgeOk{} + + err := me.call(req, res) + + return int(res.MessageCount), err +} + +/* +QueueDelete removes the queue from the server including all bindings then +purges the messages based on server configuration, returning the number of +messages purged. + +When ifUnused is true, the queue will not be deleted if there are any +consumers on the queue. If there are consumers, an error will be returned and +the channel will be closed. + +When ifEmpty is true, the queue will not be deleted if there are any messages +remaining on the queue. If there are messages, an error will be returned and +the channel will be closed. + +When noWait is true, the queue will be deleted without waiting for a response +from the server. The purged message count will not be meaningful. If the queue +could not be deleted, a channel exception will be raised and the channel will +be closed. + +*/ +func (me *Channel) QueueDelete(name string, ifUnused, ifEmpty, noWait bool) (int, error) { + req := &queueDelete{ + Queue: name, + IfUnused: ifUnused, + IfEmpty: ifEmpty, + NoWait: noWait, + } + res := &queueDeleteOk{} + + err := me.call(req, res) + + return int(res.MessageCount), err +} + +/* +Consume immediately starts delivering queued messages. + +Begin receiving on the returned chan Delivery before any other operation on the +Connection or Channel. + +Continues deliveries to the returned chan Delivery until Channel.Cancel, +Connection.Close, Channel.Close, or an AMQP exception occurs. Consumers must +range over the chan to ensure all deliveries are received. Unreceived +deliveries will block all methods on the same connection. + +All deliveries in AMQP must be acknowledged. It is expected of the consumer to +call Delivery.Ack after it has successfully processed the delivery. If the +consumer is cancelled or the channel or connection is closed any unacknowledged +deliveries will be requeued at the end of the same queue. + +The consumer is identified by a string that is unique and scoped for all +consumers on this channel. If you wish to eventually cancel the consumer, use +the same non-empty idenfitier in Channel.Cancel. An empty string will cause +the library to generate a unique identity. The consumer identity will be +included in every Delivery in the ConsumerTag field + +When autoAck (also known as noAck) is true, the server will acknowledge +deliveries to this consumer prior to writing the delivery to the network. When +autoAck is true, the consumer should not call Delivery.Ack. Automatically +acknowledging deliveries means that some deliveries may get lost if the +consumer is unable to process them after the server delivers them. + +When exclusive is true, the server will ensure that this is the sole consumer +from this queue. When exclusive is false, the server will fairly distribute +deliveries across multiple consumers. + +When noLocal is true, the server will not deliver publishing sent from the same +connection to this consumer. It's advisable to use separate connections for +Channel.Publish and Channel.Consume so not to have TCP pushback on publishing +affect the ability to consume messages, so this parameter is here mostly for +completeness. + +When noWait is true, do not wait for the server to confirm the request and +immediately begin deliveries. If it is not possible to consume, a channel +exception will be raised and the channel will be closed. + +Optional arguments can be provided that have specific semantics for the queue +or server. + +When the channel or connection closes, all delivery chans will also close. + +Deliveries on the returned chan will be buffered indefinitely. To limit memory +of this buffer, use the Channel.Qos method to limit the amount of +unacknowledged/buffered deliveries the server will deliver on this Channel. + +*/ +func (me *Channel) Consume(queue, consumer string, autoAck, exclusive, noLocal, noWait bool, args Table) (<-chan Delivery, error) { + // When we return from me.call, there may be a delivery already for the + // consumer that hasn't been added to the consumer hash yet. Because of + // this, we never rely on the server picking a consumer tag for us. + + if err := args.Validate(); err != nil { + return nil, err + } + + if consumer == "" { + consumer = uniqueConsumerTag() + } + + req := &basicConsume{ + Queue: queue, + ConsumerTag: consumer, + NoLocal: noLocal, + NoAck: autoAck, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &basicConsumeOk{} + + deliveries := make(chan Delivery) + + me.consumers.add(consumer, deliveries) + + if err := me.call(req, res); err != nil { + me.consumers.close(consumer) + return nil, err + } + + return (<-chan Delivery)(deliveries), nil +} + +/* +ExchangeDeclare declares an exchange on the server. If the exchange does not +already exist, the server will create it. If the exchange exists, the server +verifies that it is of the provided type, durability and auto-delete flags. + +Errors returned from this method will close the channel. + +Exchange names starting with "amq." are reserved for pre-declared and +standardized exchanges. The client MAY declare an exchange starting with +"amq." if the passive option is set, or the exchange already exists. Names can +consists of a non-empty sequence of letters, digits, hyphen, underscore, +period, or colon. + +Each exchange belongs to one of a set of exchange kinds/types implemented by +the server. The exchange types define the functionality of the exchange - i.e. +how messages are routed through it. Once an exchange is declared, its type +cannot be changed. The common types are "direct", "fanout", "topic" and +"headers". + +Durable and Non-Auto-Deleted exchanges will survive server restarts and remain +declared when there are no remaining bindings. This is the best lifetime for +long-lived exchange configurations like stable routes and default exchanges. + +Non-Durable and Auto-Deleted exchanges will be deleted when there are no +remaining bindings and not restored on server restart. This lifetime is +useful for temporary topologies that should not pollute the virtual host on +failure or after the consumers have completed. + +Non-Durable and Non-Auto-deleted exchanges will remain as long as the server is +running including when there are no remaining bindings. This is useful for +temporary topologies that may have long delays between bindings. + +Durable and Auto-Deleted exchanges will survive server restarts and will be +removed before and after server restarts when there are no remaining bindings. +These exchanges are useful for robust temporary topologies or when you require +binding durable queues to auto-deleted exchanges. + +Note: RabbitMQ declares the default exchange types like 'amq.fanout' as +durable, so queues that bind to these pre-declared exchanges must also be +durable. + +Exchanges declared as `internal` do not accept accept publishings. Internal +exchanges are useful for when you wish to implement inter-exchange topologies +that should not be exposed to users of the broker. + +When noWait is true, declare without waiting for a confirmation from the server. +The channel may be closed as a result of an error. Add a NotifyClose listener +to respond to any exceptions. + +Optional amqp.Table of arguments that are specific to the server's implementation of +the exchange can be sent for exchange types that require extra parameters. +*/ +func (me *Channel) ExchangeDeclare(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* + +ExchangeDeclarePassive is functionally and parametrically equivalent to +ExchangeDeclare, except that it sets the "passive" attribute to true. A passive +exchange is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent exchange will cause RabbitMQ to throw an exception. This function +can be used to detect the existence of an exchange. + +*/ +func (me *Channel) ExchangeDeclarePassive(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* +ExchangeDelete removes the named exchange from the server. When an exchange is +deleted all queue bindings on the exchange are also deleted. If this exchange +does not exist, the channel will be closed with an error. + +When ifUnused is true, the server will only delete the exchange if it has no queue +bindings. If the exchange has queue bindings the server does not delete it +but close the channel with an exception instead. Set this to true if you are +not the sole owner of the exchange. + +When noWait is true, do not wait for a server confirmation that the exchange has +been deleted. Failing to delete the channel could close the channel. Add a +NotifyClose listener to respond to these channel exceptions. +*/ +func (me *Channel) ExchangeDelete(name string, ifUnused, noWait bool) error { + return me.call( + &exchangeDelete{ + Exchange: name, + IfUnused: ifUnused, + NoWait: noWait, + }, + &exchangeDeleteOk{}, + ) +} + +/* +ExchangeBind binds an exchange to another exchange to create inter-exchange +routing topologies on the server. This can decouple the private topology and +routing exchanges from exchanges intended solely for publishing endpoints. + +Binding two exchanges with identical arguments will not create duplicate +bindings. + +Binding one exchange to another with multiple bindings will only deliver a +message once. For example if you bind your exchange to `amq.fanout` with two +different binding keys, only a single message will be delivered to your +exchange even though multiple bindings will match. + +Given a message delivered to the source exchange, the message will be forwarded +to the destination exchange when the routing key is matched. + + ExchangeBind("sell", "MSFT", "trade", false, nil) + ExchangeBind("buy", "AAPL", "trade", false, nil) + + Delivery Source Key Destination + example exchange exchange + ----------------------------------------------- + key: AAPL --> trade ----> MSFT sell + \---> AAPL --> buy + +When noWait is true, do not wait for the server to confirm the binding. If any +error occurs the channel will be closed. Add a listener to NotifyClose to +handle these errors. + +Optional arguments specific to the exchanges bound can also be specified. +*/ +func (me *Channel) ExchangeBind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeBind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeBindOk{}, + ) +} + +/* +ExchangeUnbind unbinds the destination exchange from the source exchange on the +server by removing the routing key between them. This is the inverse of +ExchangeBind. If the binding does not currently exist, an error will be +returned. + +When noWait is true, do not wait for the server to confirm the deletion of the +binding. If any error occurs the channel will be closed. Add a listener to +NotifyClose to handle these errors. + +Optional arguments that are specific to the type of exchanges bound can also be +provided. These must match the same arguments specified in ExchangeBind to +identify the binding. +*/ +func (me *Channel) ExchangeUnbind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return me.call( + &exchangeUnbind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeUnbindOk{}, + ) +} + +/* +Publish sends a Publishing from the client to an exchange on the server. + +When you want a single message to be delivered to a single queue, you can +publish to the default exchange with the routingKey of the queue name. This is +because every declared queue gets an implicit route to the default exchange. + +Since publishings are asynchronous, any undeliverable message will get returned +by the server. Add a listener with Channel.NotifyReturn to handle any +undeliverable message when calling publish with either the mandatory or +immediate parameters as true. + +Publishings can be undeliverable when the mandatory flag is true and no queue is +bound that matches the routing key, or when the immediate flag is true and no +consumer on the matched queue is ready to accept the delivery. + +This can return an error when the channel, connection or socket is closed. The +error or lack of an error does not indicate whether the server has received this +publishing. + +It is possible for publishing to not reach the broker if the underlying socket +is shutdown without pending publishing packets being flushed from the kernel +buffers. The easy way of making it probable that all publishings reach the +server is to always call Connection.Close before terminating your publishing +application. The way to ensure that all publishings reach the server is to add +a listener to Channel.NotifyPublish and put the channel in confirm mode with +Channel.Confirm. Publishing delivery tags and their corresponding +confirmations start at 1. Exit when all publishings are confirmed. + +When Publish does not return an error and the channel is in confirm mode, the +internal counter for DeliveryTags with the first confirmation starting at 1. + +*/ +func (me *Channel) Publish(exchange, key string, mandatory, immediate bool, msg Publishing) error { + if err := msg.Headers.Validate(); err != nil { + return err + } + + me.m.Lock() + defer me.m.Unlock() + + if err := me.send(me, &basicPublish{ + Exchange: exchange, + RoutingKey: key, + Mandatory: mandatory, + Immediate: immediate, + Body: msg.Body, + Properties: properties{ + Headers: msg.Headers, + ContentType: msg.ContentType, + ContentEncoding: msg.ContentEncoding, + DeliveryMode: msg.DeliveryMode, + Priority: msg.Priority, + CorrelationId: msg.CorrelationId, + ReplyTo: msg.ReplyTo, + Expiration: msg.Expiration, + MessageId: msg.MessageId, + Timestamp: msg.Timestamp, + Type: msg.Type, + UserId: msg.UserId, + AppId: msg.AppId, + }, + }); err != nil { + return err + } + + if me.confirming { + me.confirms.Publish() + } + + return nil +} + +/* +Get synchronously receives a single Delivery from the head of a queue from the +server to the client. In almost all cases, using Channel.Consume will be +preferred. + +If there was a delivery waiting on the queue and that delivery was received the +second return value will be true. If there was no delivery waiting or an error +occured, the ok bool will be false. + +All deliveries must be acknowledged including those from Channel.Get. Call +Delivery.Ack on the returned delivery when you have fully processed this +delivery. + +When autoAck is true, the server will automatically acknowledge this message so +you don't have to. But if you are unable to fully process this message before +the channel or connection is closed, the message will not get requeued. + +*/ +func (me *Channel) Get(queue string, autoAck bool) (msg Delivery, ok bool, err error) { + req := &basicGet{Queue: queue, NoAck: autoAck} + res := &basicGetOk{} + empty := &basicGetEmpty{} + + if err := me.call(req, res, empty); err != nil { + return Delivery{}, false, err + } + + if res.DeliveryTag > 0 { + return *(newDelivery(me, res)), true, nil + } + + return Delivery{}, false, nil +} + +/* +Tx puts the channel into transaction mode on the server. All publishings and +acknowledgments following this method will be atomically committed or rolled +back for a single queue. Call either Channel.TxCommit or Channel.TxRollback to +leave a this transaction and immediately start a new transaction. + +The atomicity across multiple queues is not defined as queue declarations and +bindings are not included in the transaction. + +The behavior of publishings that are delivered as mandatory or immediate while +the channel is in a transaction is not defined. + +Once a channel has been put into transaction mode, it cannot be taken out of +transaction mode. Use a different channel for non-transactional semantics. + +*/ +func (me *Channel) Tx() error { + return me.call( + &txSelect{}, + &txSelectOk{}, + ) +} + +/* +TxCommit atomically commits all publishings and acknowledgments for a single +queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (me *Channel) TxCommit() error { + return me.call( + &txCommit{}, + &txCommitOk{}, + ) +} + +/* +TxRollback atomically rolls back all publishings and acknowledgments for a +single queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (me *Channel) TxRollback() error { + return me.call( + &txRollback{}, + &txRollbackOk{}, + ) +} + +/* +Flow pauses the delivery of messages to consumers on this channel. Channels +are opened with flow control not active, to open a channel with paused +deliveries immediately call this method with true after calling +Connection.Channel. + +When active is true, this method asks the server to temporarily pause deliveries +until called again with active as false. + +Channel.Get methods will not be affected by flow control. + +This method is not intended to act as window control. Use Channel.Qos to limit +the number of unacknowledged messages or bytes in flight instead. + +The server may also send us flow methods to throttle our publishings. A well +behaving publishing client should add a listener with Channel.NotifyFlow and +pause its publishings when true is sent on that channel. + +Note: RabbitMQ prefers to use TCP push back to control flow for all channels on +a connection, so under high volume scenarios, it's wise to open separate +Connections for publishings and deliveries. + +*/ +func (me *Channel) Flow(active bool) error { + return me.call( + &channelFlow{Active: active}, + &channelFlowOk{}, + ) +} + +/* +Confirm puts this channel into confirm mode so that the client can ensure all +publishings have successfully been received by the server. After entering this +mode, the server will send a basic.ack or basic.nack message with the deliver +tag set to a 1 based incrementing index corresponding to every publishing +received after the this method returns. + +Add a listener to Channel.NotifyPublish to respond to the Confirmations. If +Channel.NotifyPublish is not called, the Confirmations will be silently +ignored. + +The order of acknowledgments is not bound to the order of deliveries. + +Ack and Nack confirmations will arrive at some point in the future. + +Unroutable mandatory or immediate messages are acknowledged immediately after +any Channel.NotifyReturn listeners have been notified. Other messages are +acknowledged when all queues that should have the message routed to them have +either have received acknowledgment of delivery or have enqueued the message, +persisting the message if necessary. + +When noWait is true, the client will not wait for a response. A channel +exception could occur if the server does not support this method. + +*/ +func (me *Channel) Confirm(noWait bool) error { + me.m.Lock() + defer me.m.Unlock() + + if err := me.call( + &confirmSelect{Nowait: noWait}, + &confirmSelectOk{}, + ); err != nil { + return err + } + + me.confirming = true + + return nil +} + +/* +Recover redelivers all unacknowledged deliveries on this channel. + +When requeue is false, messages will be redelivered to the original consumer. + +When requeue is true, messages will be redelivered to any available consumer, +potentially including the original. + +If the deliveries cannot be recovered, an error will be returned and the channel +will be closed. + +Note: this method is not implemented on RabbitMQ, use Delivery.Nack instead +*/ +func (me *Channel) Recover(requeue bool) error { + return me.call( + &basicRecover{Requeue: requeue}, + &basicRecoverOk{}, + ) +} + +/* +Ack acknowledges a delivery by its delivery tag when having been consumed with +Channel.Consume or Channel.Get. + +Ack acknowledges all message received prior to the delivery tag when multiple +is true. + +See also Delivery.Ack +*/ +func (me *Channel) Ack(tag uint64, multiple bool) error { + return me.send(me, &basicAck{ + DeliveryTag: tag, + Multiple: multiple, + }) +} + +/* +Nack negatively acknowledges a delivery by its delivery tag. Prefer this +method to notify the server that you were not able to process this delivery and +it must be redelivered or dropped. + +See also Delivery.Nack +*/ +func (me *Channel) Nack(tag uint64, multiple bool, requeue bool) error { + return me.send(me, &basicNack{ + DeliveryTag: tag, + Multiple: multiple, + Requeue: requeue, + }) +} + +/* +Reject negatively acknowledges a delivery by its delivery tag. Prefer Nack +over Reject when communicating with a RabbitMQ server because you can Nack +multiple messages, reducing the amount of protocol messages to exchange. + +See also Delivery.Reject +*/ +func (me *Channel) Reject(tag uint64, requeue bool) error { + return me.send(me, &basicReject{ + DeliveryTag: tag, + Requeue: requeue, + }) +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/client_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/client_test.go new file mode 100644 index 000000000..be816bc38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/client_test.go @@ -0,0 +1,559 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bytes" + "io" + "reflect" + "testing" + "time" +) + +type server struct { + *testing.T + r reader // framer <- client + w writer // framer -> client + S io.ReadWriteCloser // Server IO + C io.ReadWriteCloser // Client IO + + // captured client frames + start connectionStartOk + tune connectionTuneOk +} + +func defaultConfig() Config { + return Config{SASL: []Authentication{&PlainAuth{"guest", "guest"}}, Vhost: "/"} +} + +func newSession(t *testing.T) (io.ReadWriteCloser, *server) { + rs, wc := io.Pipe() + rc, ws := io.Pipe() + + rws := &logIO{t, "server", pipe{rs, ws}} + rwc := &logIO{t, "client", pipe{rc, wc}} + + server := server{ + T: t, + r: reader{rws}, + w: writer{rws}, + S: rws, + C: rwc, + } + + return rwc, &server +} + +func (t *server) expectBytes(b []byte) { + in := make([]byte, len(b)) + if _, err := io.ReadFull(t.S, in); err != nil { + t.Fatalf("io error expecting bytes: %v", err) + } + + if bytes.Compare(b, in) != 0 { + t.Fatalf("failed bytes: expected: %s got: %s", string(b), string(in)) + } +} + +func (t *server) send(channel int, m message) { + defer time.AfterFunc(time.Second, func() { panic("send deadlock") }).Stop() + + if err := t.w.WriteFrame(&methodFrame{ + ChannelId: uint16(channel), + Method: m, + }); err != nil { + t.Fatalf("frame err, write: %s", err) + } +} + +// drops all but method frames expected on the given channel +func (t *server) recv(channel int, m message) message { + defer time.AfterFunc(time.Second, func() { panic("recv deadlock") }).Stop() + + var remaining int + var header *headerFrame + var body []byte + + for { + frame, err := t.r.ReadFrame() + if err != nil { + t.Fatalf("frame err, read: %s", err) + } + + if frame.channel() != uint16(channel) { + t.Fatalf("expected frame on channel %d, got channel %d", channel, frame.channel()) + } + + switch f := frame.(type) { + case *heartbeatFrame: + // drop + + case *headerFrame: + // start content state + header = f + remaining = int(header.Size) + if remaining == 0 { + m.(messageWithContent).setContent(header.Properties, nil) + return m + } + + case *bodyFrame: + // continue until terminated + body = append(body, f.Body...) + remaining -= len(f.Body) + if remaining <= 0 { + m.(messageWithContent).setContent(header.Properties, body) + return m + } + + case *methodFrame: + if reflect.TypeOf(m) == reflect.TypeOf(f.Method) { + wantv := reflect.ValueOf(m).Elem() + havev := reflect.ValueOf(f.Method).Elem() + wantv.Set(havev) + if _, ok := m.(messageWithContent); !ok { + return m + } + } else { + t.Fatalf("expected method type: %T, got: %T", m, f.Method) + } + + default: + t.Fatalf("unexpected frame: %+v", f) + } + } + + panic("unreachable") +} + +func (t *server) expectAMQP() { + t.expectBytes([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) +} + +func (t *server) connectionStart() { + t.send(0, &connectionStart{ + VersionMajor: 0, + VersionMinor: 9, + Mechanisms: "PLAIN", + Locales: "en-us", + }) + + t.recv(0, &t.start) +} + +func (t *server) connectionTune() { + t.send(0, &connectionTune{ + ChannelMax: 11, + FrameMax: 20000, + Heartbeat: 10, + }) + + t.recv(0, &t.tune) +} + +func (t *server) connectionOpen() { + t.expectAMQP() + t.connectionStart() + t.connectionTune() + + t.recv(0, &connectionOpen{}) + t.send(0, &connectionOpenOk{}) +} + +func (t *server) connectionClose() { + t.recv(0, &connectionClose{}) + t.send(0, &connectionCloseOk{}) +} + +func (t *server) channelOpen(id int) { + t.recv(id, &channelOpen{}) + t.send(id, &channelOpenOk{}) +} + +func TestDefaultClientProperties(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + rwc.Close() + }() + + if c, err := Open(rwc, defaultConfig()); err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + if want, got := defaultProduct, srv.start.ClientProperties["product"]; want != got { + t.Errorf("expected product %s got: %s", want, got) + } + + if want, got := defaultVersion, srv.start.ClientProperties["version"]; want != got { + t.Errorf("expected version %s got: %s", want, got) + } +} + +func TestCustomClientProperties(t *testing.T) { + rwc, srv := newSession(t) + + config := defaultConfig() + config.Properties = Table{ + "product": "foo", + "version": "1.0", + } + + go func() { + srv.connectionOpen() + rwc.Close() + }() + + if c, err := Open(rwc, config); err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + if want, got := config.Properties["product"], srv.start.ClientProperties["product"]; want != got { + t.Errorf("expected product %s got: %s", want, got) + } + + if want, got := config.Properties["version"], srv.start.ClientProperties["version"]; want != got { + t.Errorf("expected version %s got: %s", want, got) + } +} + +func TestOpen(t *testing.T) { + rwc, srv := newSession(t) + go func() { + srv.connectionOpen() + rwc.Close() + }() + + if c, err := Open(rwc, defaultConfig()); err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } +} + +func TestChannelOpen(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + rwc.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } +} + +func TestOpenFailedSASLUnsupportedMechanisms(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.expectAMQP() + srv.send(0, &connectionStart{ + VersionMajor: 0, + VersionMinor: 9, + Mechanisms: "KERBEROS NTLM", + Locales: "en-us", + }) + }() + + c, err := Open(rwc, defaultConfig()) + if err != ErrSASL { + t.Fatalf("expected ErrSASL got: %+v on %+v", err, c) + } +} + +func TestOpenFailedCredentials(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.expectAMQP() + srv.connectionStart() + // Now kill/timeout the connection indicating bad auth + rwc.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != ErrCredentials { + t.Fatalf("expected ErrCredentials got: %+v on %+v", err, c) + } +} + +func TestOpenFailedVhost(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.expectAMQP() + srv.connectionStart() + srv.connectionTune() + srv.recv(0, &connectionOpen{}) + + // Now kill/timeout the connection on bad Vhost + rwc.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != ErrVhost { + t.Fatalf("expected ErrVhost got: %+v on %+v", err, c) + } +} + +func TestConfirmMultipleOrdersDeliveryTags(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(1, &confirmSelect{}) + srv.send(1, &confirmSelectOk{}) + + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + + // Single tag, plus multiple, should produce + // 2, 1, 3, 4 + srv.send(1, &basicAck{DeliveryTag: 2}) + srv.send(1, &basicAck{DeliveryTag: 1}) + srv.send(1, &basicAck{DeliveryTag: 4, Multiple: true}) + + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + srv.recv(1, &basicPublish{}) + + // And some more, but in reverse order, multiple then one + // 5, 6, 7, 8 + srv.send(1, &basicAck{DeliveryTag: 6, Multiple: true}) + srv.send(1, &basicAck{DeliveryTag: 8}) + srv.send(1, &basicAck{DeliveryTag: 7}) + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + confirm := ch.NotifyPublish(make(chan Confirmation)) + + ch.Confirm(false) + + go func() { + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 1")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 2")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 3")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 4")}) + }() + + // received out of order, consumed in order + for i, tag := range []uint64{1, 2, 3, 4} { + if ack := <-confirm; tag != ack.DeliveryTag { + t.Fatalf("failed ack, expected ack#%d to be %d, got %d", i, tag, ack.DeliveryTag) + } + } + + go func() { + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 5")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 6")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 7")}) + ch.Publish("", "q", false, false, Publishing{Body: []byte("pub 8")}) + }() + + for i, tag := range []uint64{5, 6, 7, 8} { + if ack := <-confirm; tag != ack.DeliveryTag { + t.Fatalf("failed ack, expected ack#%d to be %d, got %d", i, tag, ack.DeliveryTag) + } + } + +} + +func TestNotifyClosesReusedPublisherConfirmChan(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(1, &confirmSelect{}) + srv.send(1, &confirmSelectOk{}) + + srv.recv(0, &connectionClose{}) + srv.send(0, &connectionCloseOk{}) + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + ackAndNack := make(chan uint64) + ch.NotifyConfirm(ackAndNack, ackAndNack) + + if err := ch.Confirm(false); err != nil { + t.Fatalf("expected to enter confirm mode: %v", err) + } + + if err := c.Close(); err != nil { + t.Fatalf("could not close connection: %v (%s)", c, err) + } +} + +func TestNotifyClosesAllChansAfterConnectionClose(t *testing.T) { + rwc, srv := newSession(t) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + srv.recv(0, &connectionClose{}) + srv.send(0, &connectionCloseOk{}) + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + if err := c.Close(); err != nil { + t.Fatalf("could not close connection: %v (%s)", c, err) + } + + select { + case <-c.NotifyClose(make(chan *Error)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close NotifyClose chan after Connection.Close") + } + + select { + case <-ch.NotifyClose(make(chan *Error)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Connection.NotifyClose chan after Connection.Close") + } + + select { + case <-ch.NotifyFlow(make(chan bool)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Channel.NotifyFlow chan after Connection.Close") + } + + select { + case <-ch.NotifyCancel(make(chan string)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Channel.NofityCancel chan after Connection.Close") + } + + select { + case <-ch.NotifyReturn(make(chan Return)): + case <-time.After(time.Millisecond): + t.Errorf("expected to close Channel.NotifyReturn chan after Connection.Close") + } + + confirms := ch.NotifyPublish(make(chan Confirmation)) + + select { + case <-confirms: + case <-time.After(time.Millisecond): + t.Errorf("expected to close confirms on Channel.NotifyPublish chan after Connection.Close") + } +} + +// Should not panic when sending bodies split at differnet boundaries +func TestPublishBodySliceIssue74(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + const frameSize = 100 + const publishings = frameSize * 3 + + done := make(chan bool) + base := make([]byte, publishings) + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + + for i := 0; i < publishings; i++ { + srv.recv(1, &basicPublish{}) + } + + done <- true + }() + + cfg := defaultConfig() + cfg.FrameSize = frameSize + + c, err := Open(rwc, cfg) + if err != nil { + t.Fatalf("could not create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("could not open channel: %v (%s)", ch, err) + } + + for i := 0; i < publishings; i++ { + go ch.Publish("", "q", false, false, Publishing{Body: base[0:i]}) + } + + <-done +} + +func TestPublishAndShutdownDeadlockIssue84(t *testing.T) { + rwc, srv := newSession(t) + defer rwc.Close() + + go func() { + srv.connectionOpen() + srv.channelOpen(1) + srv.recv(1, &basicPublish{}) + // Mimic a broken io pipe so that Publish catches the error and goes into shutdown + srv.S.Close() + }() + + c, err := Open(rwc, defaultConfig()) + if err != nil { + t.Fatalf("couldn't create connection: %v (%s)", c, err) + } + + ch, err := c.Channel() + if err != nil { + t.Fatalf("couldn't open channel: %v (%s)", ch, err) + } + + defer time.AfterFunc(500*time.Millisecond, func() { panic("Publish deadlock") }).Stop() + for { + if err := ch.Publish("exchange", "q", false, false, Publishing{Body: []byte("test")}); err != nil { + t.Log("successfully caught disconnect error", err) + return + } + } +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/confirms.go b/Godeps/_workspace/src/github.com/streadway/amqp/confirms.go new file mode 100644 index 000000000..ebee9368b --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/confirms.go @@ -0,0 +1,93 @@ +package amqp + +import "sync" + +// confirms resequences and notifies one or multiple publisher confirmation listeners +type confirms struct { + m sync.Mutex + listeners []chan Confirmation + sequencer map[uint64]Confirmation + published uint64 + expecting uint64 +} + +// newConfirms allocates a confirms +func newConfirms() *confirms { + return &confirms{ + sequencer: map[uint64]Confirmation{}, + published: 0, + expecting: 1, + } +} + +func (c *confirms) Listen(l chan Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + c.listeners = append(c.listeners, l) +} + +// publish increments the publishing counter +func (c *confirms) Publish() uint64 { + c.m.Lock() + defer c.m.Unlock() + + c.published++ + return c.published +} + +// confirm confirms one publishing, increments the expecting delivery tag, and +// removes bookkeeping for that delivery tag. +func (c *confirms) confirm(confirmation Confirmation) { + delete(c.sequencer, c.expecting) + c.expecting++ + for _, l := range c.listeners { + l <- confirmation + } +} + +// resequence confirms any out of order delivered confirmations +func (c *confirms) resequence() { + for c.expecting <= c.published { + sequenced, found := c.sequencer[c.expecting] + if !found { + return + } + c.confirm(sequenced) + } +} + +// one confirms one publishing and all following in the publishing sequence +func (c *confirms) One(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + if c.expecting == confirmed.DeliveryTag { + c.confirm(confirmed) + } else { + c.sequencer[confirmed.DeliveryTag] = confirmed + } + c.resequence() +} + +// multiple confirms all publishings up until the delivery tag +func (c *confirms) Multiple(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + for c.expecting <= confirmed.DeliveryTag { + c.confirm(Confirmation{c.expecting, confirmed.Ack}) + } +} + +// Close closes all listeners, discarding any out of sequence confirmations +func (c *confirms) Close() error { + c.m.Lock() + defer c.m.Unlock() + + for _, l := range c.listeners { + close(l) + } + c.listeners = nil + return nil +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/confirms_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/confirms_test.go new file mode 100644 index 000000000..7eb2acc06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/confirms_test.go @@ -0,0 +1,119 @@ +package amqp + +import ( + "testing" + "time" +) + +func TestConfirmOneResequences(t *testing.T) { + var ( + fixtures = []Confirmation{ + {1, true}, + {2, false}, + {3, true}, + } + c = newConfirms() + l = make(chan Confirmation, len(fixtures)) + ) + + c.Listen(l) + + for i, _ := range fixtures { + if want, got := uint64(i+1), c.Publish(); want != got { + t.Fatalf("expected publish to return the 1 based delivery tag published, want: %d, got: %d", want, got) + } + } + + c.One(fixtures[1]) + c.One(fixtures[2]) + + select { + case confirm := <-l: + t.Fatalf("expected to wait in order to properly resequence results, got: %+v", confirm) + default: + } + + c.One(fixtures[0]) + + for i, fix := range fixtures { + if want, got := fix, <-l; want != got { + t.Fatalf("expected to return confirmations in sequence for %d, want: %+v, got: %+v", i, want, got) + } + } +} + +func TestConfirmMultipleResequences(t *testing.T) { + var ( + fixtures = []Confirmation{ + {1, true}, + {2, true}, + {3, true}, + {4, true}, + } + c = newConfirms() + l = make(chan Confirmation, len(fixtures)) + ) + c.Listen(l) + + for _, _ = range fixtures { + c.Publish() + } + + c.Multiple(fixtures[len(fixtures)-1]) + + for i, fix := range fixtures { + if want, got := fix, <-l; want != got { + t.Fatalf("expected to confirm multiple in sequence for %d, want: %+v, got: %+v", i, want, got) + } + } +} + +func BenchmarkSequentialBufferedConfirms(t *testing.B) { + var ( + c = newConfirms() + l = make(chan Confirmation, 10) + ) + + c.Listen(l) + + for i := 0; i < t.N; i++ { + if i > cap(l)-1 { + <-l + } + c.One(Confirmation{c.Publish(), true}) + } +} + +func TestConfirmsIsThreadSafe(t *testing.T) { + const count = 1000 + const timeout = 5 * time.Second + var ( + c = newConfirms() + l = make(chan Confirmation) + pub = make(chan Confirmation) + done = make(chan Confirmation) + late = time.After(timeout) + ) + + c.Listen(l) + + for i := 0; i < count; i++ { + go func() { pub <- Confirmation{c.Publish(), true} }() + } + + for i := 0; i < count; i++ { + go func() { c.One(<-pub) }() + } + + for i := 0; i < count; i++ { + go func() { done <- <-l }() + } + + for i := 0; i < count; i++ { + select { + case <-done: + case <-late: + t.Fatalf("expected all publish/confirms to finish after %s", timeout) + } + } +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/connection.go b/Godeps/_workspace/src/github.com/streadway/amqp/connection.go new file mode 100644 index 000000000..ad4007978 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/connection.go @@ -0,0 +1,769 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "crypto/tls" + "io" + "net" + "reflect" + "strconv" + "strings" + "sync" + "time" +) + +const ( + maxChannelMax = (2 << 15) - 1 + + defaultHeartbeat = 10 * time.Second + defaultConnectionTimeout = 30 * time.Second + defaultProduct = "https://github.com/streadway/amqp" + defaultVersion = "β" + defaultChannelMax = maxChannelMax +) + +// Config is used in DialConfig and Open to specify the desired tuning +// parameters used during a connection open handshake. The negotiated tuning +// will be stored in the returned connection's Config field. +type Config struct { + // The SASL mechanisms to try in the client request, and the successful + // mechanism used on the Connection object. + // If SASL is nil, PlainAuth from the URL is used. + SASL []Authentication + + // Vhost specifies the namespace of permissions, exchanges, queues and + // bindings on the server. Dial sets this to the path parsed from the URL. + Vhost string + + ChannelMax int // 0 max channels means 2^16 - 1 + FrameSize int // 0 max bytes means unlimited + Heartbeat time.Duration // less than 1s uses the server's interval + + // TLSClientConfig specifies the client configuration of the TLS connection + // when establishing a tls transport. + // If the URL uses an amqps scheme, then an empty tls.Config with the + // ServerName from the URL is used. + TLSClientConfig *tls.Config + + // Properties is table of properties that the client advertises to the server. + // This is an optional setting - if the application does not set this, + // the underlying library will use a generic set of client properties. + Properties Table + + // Dial returns a net.Conn prepared for a TLS handshake with TSLClientConfig, + // then an AMQP connection handshake. + // If Dial is nil, net.DialTimeout with a 30s connection and 30s read + // deadline is used. + Dial func(network, addr string) (net.Conn, error) +} + +// Connection manages the serialization and deserialization of frames from IO +// and dispatches the frames to the appropriate channel. All RPC methods and +// asyncronous Publishing, Delivery, Ack, Nack and Return messages are +// multiplexed on this channel. There must always be active receivers for +// every asynchronous message on this connection. +type Connection struct { + destructor sync.Once // shutdown once + sendM sync.Mutex // conn writer mutex + m sync.Mutex // struct field mutex + + conn io.ReadWriteCloser + + rpc chan message + writer *writer + sends chan time.Time // timestamps of each frame sent + deadlines chan readDeadliner // heartbeater updates read deadlines + + allocator *allocator // id generator valid after openTune + channels map[uint16]*Channel + + noNotify bool // true when we will never notify again + closes []chan *Error + blocks []chan Blocking + + errors chan *Error + + Config Config // The negotiated Config after connection.open + + Major int // Server's major version + Minor int // Server's minor version + Properties Table // Server properties +} + +type readDeadliner interface { + SetReadDeadline(time.Time) error +} + +type localNetAddr interface { + LocalAddr() net.Addr +} + +// defaultDial establishes a connection when config.Dial is not provided +func defaultDial(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, defaultConnectionTimeout) + if err != nil { + return nil, err + } + + // Heartbeating hasn't started yet, don't stall forever on a dead server. + if err := conn.SetReadDeadline(time.Now().Add(defaultConnectionTimeout)); err != nil { + return nil, err + } + + return conn, nil +} + +// Dial accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the initial read deadline to 30 seconds. +// +// Dial uses the zero value of tls.Config when it encounters an amqps:// +// scheme. It is equivalent to calling DialTLS(amqp, nil). +func Dial(url string) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + }) +} + +// DialTLS accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the initial read deadline to 30 seconds. +// +// DialTLS uses the provided tls.Config when encountering an amqps:// scheme. +func DialTLS(url string, amqps *tls.Config) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + TLSClientConfig: amqps, + }) +} + +// DialConfig accepts a string in the AMQP URI format and a configuration for +// the transport and connection setup, returning a new Connection. Defaults to +// a server heartbeat interval of 10 seconds and sets the initial read deadline +// to 30 seconds. +func DialConfig(url string, config Config) (*Connection, error) { + var err error + var conn net.Conn + + uri, err := ParseURI(url) + if err != nil { + return nil, err + } + + if config.SASL == nil { + config.SASL = []Authentication{uri.PlainAuth()} + } + + if config.Vhost == "" { + config.Vhost = uri.Vhost + } + + if uri.Scheme == "amqps" && config.TLSClientConfig == nil { + config.TLSClientConfig = new(tls.Config) + } + + addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10)) + + dialer := config.Dial + if dialer == nil { + dialer = defaultDial + } + + conn, err = dialer("tcp", addr) + if err != nil { + return nil, err + } + + if config.TLSClientConfig != nil { + // Use the URI's host for hostname validation unless otherwise set. Make a + // copy so not to modify the caller's reference when the caller reuses a + // tls.Config for a different URL. + if config.TLSClientConfig.ServerName == "" { + c := *config.TLSClientConfig + c.ServerName = uri.Host + config.TLSClientConfig = &c + } + + client := tls.Client(conn, config.TLSClientConfig) + if err := client.Handshake(); err != nil { + conn.Close() + return nil, err + } + + conn = client + } + + return Open(conn, config) +} + +/* +Open accepts an already established connection, or other io.ReadWriteCloser as +a transport. Use this method if you have established a TLS connection or wish +to use your own custom transport. + +*/ +func Open(conn io.ReadWriteCloser, config Config) (*Connection, error) { + me := &Connection{ + conn: conn, + writer: &writer{bufio.NewWriter(conn)}, + channels: make(map[uint16]*Channel), + rpc: make(chan message), + sends: make(chan time.Time), + errors: make(chan *Error, 1), + deadlines: make(chan readDeadliner, 1), + } + go me.reader(conn) + return me, me.open(config) +} + +/* +LocalAddr returns the local TCP peer address, or ":0" (the zero value of net.TCPAddr) +as a fallback default value if the underlying transport does not support LocalAddr(). +*/ +func (me *Connection) LocalAddr() net.Addr { + if c, ok := me.conn.(localNetAddr); ok { + return c.LocalAddr() + } + return &net.TCPAddr{} +} + +/* +NotifyClose registers a listener for close events either initiated by an error +accompaning a connection.close method or by a normal shutdown. + +On normal shutdowns, the chan will be closed. + +To reconnect after a transport or protocol error, register a listener here and +re-run your setup process. + +*/ +func (me *Connection) NotifyClose(c chan *Error) chan *Error { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.closes = append(me.closes, c) + } + + return c +} + +/* +NotifyBlock registers a listener for RabbitMQ specific TCP flow control method +extensions connection.blocked and connection.unblocked. Flow control is active +with a reason when Blocking.Blocked is true. When a Connection is blocked, all +methods will block across all connections until server resources become free +again. + +This optional extension is supported by the server when the +"connection.blocked" server capability key is true. + +*/ +func (me *Connection) NotifyBlocked(c chan Blocking) chan Blocking { + me.m.Lock() + defer me.m.Unlock() + + if me.noNotify { + close(c) + } else { + me.blocks = append(me.blocks, c) + } + + return c +} + +/* +Close requests and waits for the response to close the AMQP connection. + +It's advisable to use this message when publishing to ensure all kernel buffers +have been flushed on the server and client before exiting. + +An error indicates that server may not have received this request to close but +the connection should be treated as closed regardless. + +After returning from this call, all resources associated with this connection, +including the underlying io, Channels, Notify listeners and Channel consumers +will also be closed. +*/ +func (me *Connection) Close() error { + defer me.shutdown(nil) + return me.call( + &connectionClose{ + ReplyCode: replySuccess, + ReplyText: "kthxbai", + }, + &connectionCloseOk{}, + ) +} + +func (me *Connection) closeWith(err *Error) error { + defer me.shutdown(err) + return me.call( + &connectionClose{ + ReplyCode: uint16(err.Code), + ReplyText: err.Reason, + }, + &connectionCloseOk{}, + ) +} + +func (me *Connection) send(f frame) error { + me.sendM.Lock() + err := me.writer.WriteFrame(f) + me.sendM.Unlock() + + if err != nil { + // shutdown could be re-entrant from signaling notify chans + go me.shutdown(&Error{ + Code: FrameError, + Reason: err.Error(), + }) + } else { + // Broadcast we sent a frame, reducing heartbeats, only + // if there is something that can receive - like a non-reentrant + // call or if the heartbeater isn't running + select { + case me.sends <- time.Now(): + default: + } + } + + return err +} + +func (me *Connection) shutdown(err *Error) { + me.destructor.Do(func() { + if err != nil { + for _, c := range me.closes { + c <- err + } + } + + for _, ch := range me.channels { + me.closeChannel(ch, err) + } + + if err != nil { + me.errors <- err + } + + me.conn.Close() + + for _, c := range me.closes { + close(c) + } + + for _, c := range me.blocks { + close(c) + } + + me.m.Lock() + me.noNotify = true + me.m.Unlock() + }) +} + +// All methods sent to the connection channel should be synchronous so we +// can handle them directly without a framing component +func (me *Connection) demux(f frame) { + if f.channel() == 0 { + me.dispatch0(f) + } else { + me.dispatchN(f) + } +} + +func (me *Connection) dispatch0(f frame) { + switch mf := f.(type) { + case *methodFrame: + switch m := mf.Method.(type) { + case *connectionClose: + // Send immediately as shutdown will close our side of the writer. + me.send(&methodFrame{ + ChannelId: 0, + Method: &connectionCloseOk{}, + }) + + me.shutdown(newError(m.ReplyCode, m.ReplyText)) + case *connectionBlocked: + for _, c := range me.blocks { + c <- Blocking{Active: true, Reason: m.Reason} + } + case *connectionUnblocked: + for _, c := range me.blocks { + c <- Blocking{Active: false} + } + default: + me.rpc <- m + } + case *heartbeatFrame: + // kthx - all reads reset our deadline. so we can drop this + default: + // lolwat - channel0 only responds to methods and heartbeats + me.closeWith(ErrUnexpectedFrame) + } +} + +func (me *Connection) dispatchN(f frame) { + me.m.Lock() + channel := me.channels[f.channel()] + me.m.Unlock() + + if channel != nil { + channel.recv(channel, f) + } else { + me.dispatchClosed(f) + } +} + +// section 2.3.7: "When a peer decides to close a channel or connection, it +// sends a Close method. The receiving peer MUST respond to a Close with a +// Close-Ok, and then both parties can close their channel or connection. Note +// that if peers ignore Close, deadlock can happen when both peers send Close +// at the same time." +// +// When we don't have a channel, so we must respond with close-ok on a close +// method. This can happen between a channel exception on an asynchronous +// method like basic.publish and a synchronous close with channel.close. +// In that case, we'll get both a channel.close and channel.close-ok in any +// order. +func (me *Connection) dispatchClosed(f frame) { + // Only consider method frames, drop content/header frames + if mf, ok := f.(*methodFrame); ok { + switch mf.Method.(type) { + case *channelClose: + me.send(&methodFrame{ + ChannelId: f.channel(), + Method: &channelCloseOk{}, + }) + case *channelCloseOk: + // we are already closed, so do nothing + default: + // unexpected method on closed channel + me.closeWith(ErrClosed) + } + } +} + +// Reads each frame off the IO and hand off to the connection object that +// will demux the streams and dispatch to one of the opened channels or +// handle on channel 0 (the connection channel). +func (me *Connection) reader(r io.Reader) { + buf := bufio.NewReader(r) + frames := &reader{buf} + conn, haveDeadliner := r.(readDeadliner) + + for { + frame, err := frames.ReadFrame() + + if err != nil { + me.shutdown(&Error{Code: FrameError, Reason: err.Error()}) + return + } + + me.demux(frame) + + if haveDeadliner { + me.deadlines <- conn + } + } +} + +// Ensures that at least one frame is being sent at the tuned interval with a +// jitter tolerance of 1s +func (me *Connection) heartbeater(interval time.Duration, done chan *Error) { + const maxServerHeartbeatsInFlight = 3 + + var sendTicks <-chan time.Time + if interval > 0 { + ticker := time.NewTicker(interval) + defer ticker.Stop() + sendTicks = ticker.C + } + + lastSent := time.Now() + + for { + select { + case at, stillSending := <-me.sends: + // When actively sending, depend on sent frames to reset server timer + if stillSending { + lastSent = at + } else { + return + } + + case at := <-sendTicks: + // When idle, fill the space with a heartbeat frame + if at.Sub(lastSent) > interval-time.Second { + if err := me.send(&heartbeatFrame{}); err != nil { + // send heartbeats even after close/closeOk so we + // tick until the connection starts erroring + return + } + } + + case conn := <-me.deadlines: + // When reading, reset our side of the deadline, if we've negotiated one with + // a deadline that covers at least 2 server heartbeats + if interval > 0 { + conn.SetReadDeadline(time.Now().Add(maxServerHeartbeatsInFlight * interval)) + } + + case <-done: + return + } + } +} + +// Convenience method to inspect the Connection.Properties["capabilities"] +// Table for server identified capabilities like "basic.ack" or +// "confirm.select". +func (me *Connection) isCapable(featureName string) bool { + capabilities, _ := me.Properties["capabilities"].(Table) + hasFeature, _ := capabilities[featureName].(bool) + return hasFeature +} + +// allocateChannel records but does not open a new channel with a unique id. +// This method is the initial part of the channel lifecycle and paired with +// releaseChannel +func (me *Connection) allocateChannel() (*Channel, error) { + me.m.Lock() + defer me.m.Unlock() + + id, ok := me.allocator.next() + if !ok { + return nil, ErrChannelMax + } + + ch := newChannel(me, uint16(id)) + me.channels[uint16(id)] = ch + + return ch, nil +} + +// releaseChannel removes a channel from the registry as the final part of the +// channel lifecycle +func (me *Connection) releaseChannel(id uint16) { + me.m.Lock() + defer me.m.Unlock() + + delete(me.channels, id) + me.allocator.release(int(id)) +} + +// openChannel allocates and opens a channel, must be paired with closeChannel +func (me *Connection) openChannel() (*Channel, error) { + ch, err := me.allocateChannel() + if err != nil { + return nil, err + } + + if err := ch.open(); err != nil { + return nil, err + } + return ch, nil +} + +// closeChannel releases and initiates a shutdown of the channel. All channel +// closures should be initiated here for proper channel lifecycle management on +// this connection. +func (me *Connection) closeChannel(ch *Channel, e *Error) { + ch.shutdown(e) + me.releaseChannel(ch.id) +} + +/* +Channel opens a unique, concurrent server channel to process the bulk of AMQP +messages. Any error from methods on this receiver will render the receiver +invalid and a new Channel should be opened. + +*/ +func (me *Connection) Channel() (*Channel, error) { + return me.openChannel() +} + +func (me *Connection) call(req message, res ...message) error { + // Special case for when the protocol header frame is sent insted of a + // request method + if req != nil { + if err := me.send(&methodFrame{ChannelId: 0, Method: req}); err != nil { + return err + } + } + + select { + case err := <-me.errors: + return err + + case msg := <-me.rpc: + // Try to match one of the result types + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } + + panic("unreachable") +} + +// Connection = open-Connection *use-Connection close-Connection +// open-Connection = C:protocol-header +// S:START C:START-OK +// *challenge +// S:TUNE C:TUNE-OK +// C:OPEN S:OPEN-OK +// challenge = S:SECURE C:SECURE-OK +// use-Connection = *channel +// close-Connection = C:CLOSE S:CLOSE-OK +// / S:CLOSE C:CLOSE-OK +func (me *Connection) open(config Config) error { + if err := me.send(&protocolHeader{}); err != nil { + return err + } + + return me.openStart(config) +} + +func (me *Connection) openStart(config Config) error { + start := &connectionStart{} + + if err := me.call(nil, start); err != nil { + return err + } + + me.Major = int(start.VersionMajor) + me.Minor = int(start.VersionMinor) + me.Properties = Table(start.ServerProperties) + + // eventually support challenge/response here by also responding to + // connectionSecure. + auth, ok := pickSASLMechanism(config.SASL, strings.Split(start.Mechanisms, " ")) + if !ok { + return ErrSASL + } + + // Save this mechanism off as the one we chose + me.Config.SASL = []Authentication{auth} + + return me.openTune(config, auth) +} + +func (me *Connection) openTune(config Config, auth Authentication) error { + if len(config.Properties) == 0 { + config.Properties = Table{ + "product": defaultProduct, + "version": defaultVersion, + } + } + + config.Properties["capabilities"] = Table{ + "connection.blocked": true, + "consumer_cancel_notify": true, + } + + ok := &connectionStartOk{ + Mechanism: auth.Mechanism(), + Response: auth.Response(), + ClientProperties: config.Properties, + } + tune := &connectionTune{} + + if err := me.call(ok, tune); err != nil { + // per spec, a connection can only be closed when it has been opened + // so at this point, we know it's an auth error, but the socket + // was closed instead. Return a meaningful error. + return ErrCredentials + } + + // When the server and client both use default 0, then the max channel is + // only limited by uint16. + me.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax)) + if me.Config.ChannelMax == 0 { + me.Config.ChannelMax = defaultChannelMax + } + me.Config.ChannelMax = min(me.Config.ChannelMax, maxChannelMax) + + // Frame size includes headers and end byte (len(payload)+8), even if + // this is less than FrameMinSize, use what the server sends because the + // alternative is to stop the handshake here. + me.Config.FrameSize = pick(config.FrameSize, int(tune.FrameMax)) + + // Save this off for resetDeadline() + me.Config.Heartbeat = time.Second * time.Duration(pick( + int(config.Heartbeat/time.Second), + int(tune.Heartbeat))) + + // "The client should start sending heartbeats after receiving a + // Connection.Tune method" + go me.heartbeater(me.Config.Heartbeat, me.NotifyClose(make(chan *Error, 1))) + + if err := me.send(&methodFrame{ + ChannelId: 0, + Method: &connectionTuneOk{ + ChannelMax: uint16(me.Config.ChannelMax), + FrameMax: uint32(me.Config.FrameSize), + Heartbeat: uint16(me.Config.Heartbeat / time.Second), + }, + }); err != nil { + return err + } + + return me.openVhost(config) +} + +func (me *Connection) openVhost(config Config) error { + req := &connectionOpen{VirtualHost: config.Vhost} + res := &connectionOpenOk{} + + if err := me.call(req, res); err != nil { + // Cannot be closed yet, but we know it's a vhost problem + return ErrVhost + } + + me.Config.Vhost = config.Vhost + + return me.openComplete() +} + +// openComplete performs any final Connection initialization dependent on the +// connection handshake. +func (me *Connection) openComplete() error { + me.allocator = newAllocator(1, me.Config.ChannelMax) + return nil +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func pick(client, server int) int { + if client == 0 || server == 0 { + return max(client, server) + } + return min(client, server) +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/consumers.go b/Godeps/_workspace/src/github.com/streadway/amqp/consumers.go new file mode 100644 index 000000000..b6bd60575 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/consumers.go @@ -0,0 +1,118 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" + "os" + "sync" + "sync/atomic" +) + +var consumerSeq uint64 + +func uniqueConsumerTag() string { + return fmt.Sprintf("ctag-%s-%d", os.Args[0], atomic.AddUint64(&consumerSeq, 1)) +} + +type consumerBuffers map[string]chan *Delivery + +// Concurrent type that manages the consumerTag -> +// ingress consumerBuffer mapping +type consumers struct { + sync.Mutex + chans consumerBuffers +} + +func makeConsumers() *consumers { + return &consumers{chans: make(consumerBuffers)} +} + +func bufferDeliveries(in chan *Delivery, out chan Delivery) { + var queue []*Delivery + var queueIn = in + + for delivery := range in { + select { + case out <- *delivery: + // delivered immediately while the consumer chan can receive + default: + queue = append(queue, delivery) + } + + for len(queue) > 0 { + select { + case out <- *queue[0]: + queue = queue[1:] + case delivery, open := <-queueIn: + if open { + queue = append(queue, delivery) + } else { + // stop receiving to drain the queue + queueIn = nil + } + } + } + } + + close(out) +} + +// On key conflict, close the previous channel. +func (me *consumers) add(tag string, consumer chan Delivery) { + me.Lock() + defer me.Unlock() + + if prev, found := me.chans[tag]; found { + close(prev) + } + + in := make(chan *Delivery) + go bufferDeliveries(in, consumer) + + me.chans[tag] = in +} + +func (me *consumers) close(tag string) (found bool) { + me.Lock() + defer me.Unlock() + + ch, found := me.chans[tag] + + if found { + delete(me.chans, tag) + close(ch) + } + + return found +} + +func (me *consumers) closeAll() { + me.Lock() + defer me.Unlock() + + for _, ch := range me.chans { + close(ch) + } + + me.chans = make(consumerBuffers) +} + +// Sends a delivery to a the consumer identified by `tag`. +// If unbuffered channels are used for Consume this method +// could block all deliveries until the consumer +// receives on the other end of the channel. +func (me *consumers) send(tag string, msg *Delivery) bool { + me.Lock() + defer me.Unlock() + + buffer, found := me.chans[tag] + if found { + buffer <- msg + } + + return found +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/delivery.go b/Godeps/_workspace/src/github.com/streadway/amqp/delivery.go new file mode 100644 index 000000000..f84ae4592 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/delivery.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "time" +) + +var errDeliveryNotInitialized = errors.New("delivery not initialized") + +// Acknowledger notifies the server of successful or failed consumption of +// delivieries via identifier found in the Delivery.DeliveryTag field. +// +// Applications can provide mock implementations in tests of Delivery handlers. +type Acknowledger interface { + Ack(tag uint64, multiple bool) error + Nack(tag uint64, multiple bool, requeue bool) error + Reject(tag uint64, requeue bool) error +} + +// Delivery captures the fields for a previously delivered message resident in +// a queue to be delivered by the server to a consumer from Channel.Consume or +// Channel.Get. +type Delivery struct { + Acknowledger Acknowledger // the channel from which this delivery arrived + + Headers Table // Application or header exchange table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // queue implemention use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user - should be authenticated user + AppId string // application use - creating application id + + // Valid only with Channel.Consume + ConsumerTag string + + // Valid only with Channel.Get + MessageCount uint32 + + DeliveryTag uint64 + Redelivered bool + Exchange string // basic.publish exhange + RoutingKey string // basic.publish routing key + + Body []byte +} + +func newDelivery(channel *Channel, msg messageWithContent) *Delivery { + props, body := msg.getContent() + + delivery := Delivery{ + Acknowledger: channel, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } + + // Properties for the delivery types + switch m := msg.(type) { + case *basicDeliver: + delivery.ConsumerTag = m.ConsumerTag + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + + case *basicGetOk: + delivery.MessageCount = m.MessageCount + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + } + + return &delivery +} + +/* +Ack delegates an acknowledgement through the Acknowledger interface that the +client or server has finished work on a delivery. + +All deliveries in AMQP must be acknowledged. If you called Channel.Consume +with autoAck true then the server will be automatically ack each message and +this method should not be called. Otherwise, you must call Delivery.Ack after +you have successfully processed this delivery. + +When multiple is true, this delivery and all prior unacknowledged deliveries +on the same channel will be acknowledged. This is useful for batch processing +of deliveries. + +An error will indicate that the acknowledge could not be delivered to the +channel it was sent from. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (me Delivery) Ack(multiple bool) error { + if me.Acknowledger == nil { + return errDeliveryNotInitialized + } + return me.Acknowledger.Ack(me.DeliveryTag, multiple) +} + +/* +Reject delegates a negatively acknowledgement through the Acknowledger interface. + +When requeue is true, queue this message to be delivered to a consumer on a +different channel. When requeue is false or the server is unable to queue this +message, it will be dropped. + +If you are batch processing deliveries, and your server supports it, prefer +Delivery.Nack. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (me Delivery) Reject(requeue bool) error { + if me.Acknowledger == nil { + return errDeliveryNotInitialized + } + return me.Acknowledger.Reject(me.DeliveryTag, requeue) +} + +/* +Nack negatively acknowledge the delivery of message(s) identified by the +delivery tag from either the client or server. + +When multiple is true, nack messages up to and including delivered messages up +until the delivery tag delivered on the same channel. + +When requeue is true, request the server to deliver this message to a different +consumer. If it is not possible or requeue is false, the message will be +dropped or delivered to a server configured dead-letter queue. + +This method must not be used to select or requeue messages the client wishes +not to handle, rather it is to inform the server that the client is incapable +of handling this message at this time. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (me Delivery) Nack(multiple, requeue bool) error { + if me.Acknowledger == nil { + return errDeliveryNotInitialized + } + return me.Acknowledger.Nack(me.DeliveryTag, multiple, requeue) +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/delivery_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/delivery_test.go new file mode 100644 index 000000000..f126f87d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/delivery_test.go @@ -0,0 +1,33 @@ +package amqp + +import "testing" + +func shouldNotPanic(t *testing.T) { + if err := recover(); err != nil { + t.Fatalf("should not panic, got: %s", err) + } +} + +// A closed delivery chan could produce zero value. Ack/Nack/Reject on these +// deliveries can produce a nil pointer panic. Instead return an error when +// the method can never be successful. +func TestAckZeroValueAcknowledgerDoesNotPanic(t *testing.T) { + defer shouldNotPanic(t) + if err := (Delivery{}).Ack(false); err == nil { + t.Errorf("expected Delivery{}.Ack to error") + } +} + +func TestNackZeroValueAcknowledgerDoesNotPanic(t *testing.T) { + defer shouldNotPanic(t) + if err := (Delivery{}).Nack(false, false); err == nil { + t.Errorf("expected Delivery{}.Ack to error") + } +} + +func TestRejectZeroValueAcknowledgerDoesNotPanic(t *testing.T) { + defer shouldNotPanic(t) + if err := (Delivery{}).Reject(false); err == nil { + t.Errorf("expected Delivery{}.Ack to error") + } +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/doc.go b/Godeps/_workspace/src/github.com/streadway/amqp/doc.go new file mode 100644 index 000000000..94c29f825 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/doc.go @@ -0,0 +1,108 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* +AMQP 0.9.1 client with RabbitMQ extensions + +Understand the AMQP 0.9.1 messaging model by reviewing these links first. Much +of the terminology in this library directly relates to AMQP concepts. + + Resources + + http://www.rabbitmq.com/tutorials/amqp-concepts.html + http://www.rabbitmq.com/getstarted.html + http://www.rabbitmq.com/amqp-0-9-1-reference.html + +Design + +Most other broker clients publish to queues, but in AMQP, clients publish +Exchanges instead. AMQP is programmable, meaning that both the producers and +consumers agree on the configuration of the broker, instead requiring an +operator or system configuration that declares the logical topology in the +broker. The routing between producers and consumer queues is via Bindings. +These bindings form the logical topology of the broker. + +In this library, a message sent from publisher is called a "Publishing" and a +message received to a consumer is called a "Delivery". The fields of +Publishings and Deliveries are close but not exact mappings to the underlying +wire format to maintain stronger types. Many other libraries will combine +message properties with message headers. In this library, the message well +known properties are strongly typed fields on the Publishings and Deliveries, +whereas the user defined headers are in the Headers field. + +The method naming closely matches the protocol's method name with positional +parameters mapping to named protocol message fields. The motivation here is to +present a comprehensive view over all possible interactions with the server. + +Generally, methods that map to protocol methods of the "basic" class will be +elided in this interface, and "select" methods of various channel mode selectors +will be elided for example Channel.Confirm and Channel.Tx. + +The library is intentionally designed to be synchronous, where responses for +each protocol message are required to be received in an RPC manner. Some +methods have a noWait parameter like Channel.QueueDeclare, and some methods are +asynchronous like Channel.Publish. The error values should still be checked for +these methods as they will indicate IO failures like when the underlying +connection closes. + +Asynchronous Events + +Clients of this library may be interested in receiving some of the protocol +messages other than Deliveries like basic.ack methods while a channel is in +confirm mode. + +The Notify* methods with Connection and Channel receivers model the pattern of +asynchronous events like closes due to exceptions, or messages that are sent out +of band from an RPC call like basic.ack or basic.flow. + +Any asynchronous events, including Deliveries and Publishings must always have +a receiver until the corresponding chans are closed. Without asynchronous +receivers, the sychronous methods will block. + +Use Case + +It's important as a client to an AMQP topology to ensure the state of the +broker matches your expectations. For both publish and consume use cases, +make sure you declare the queues, exchanges and bindings you expect to exist +prior to calling Channel.Publish or Channel.Consume. + + // Connections start with amqp.Dial() typically from a command line argument + // or environment variable. + connection, err := amqp.Dial(os.Getenv("AMQP_URL")) + + // To cleanly shutdown by flushing kernel buffers, make sure to close and + // wait for the response. + defer connection.Close() + + // Most operations happen on a channel. If any error is returned on a + // channel, the channel will no longer be valid, throw it away and try with + // a different channel. If you use many channels, it's useful for the + // server to + channel, err := connection.Channel() + + // Declare your topology here, if it doesn't exist, it will be created, if + // it existed already and is not what you expect, then that's considered an + // error. + + // Use your connection on this topology with either Publish or Consume, or + // inspect your queues with QueueInspect. It's unwise to mix Publish and + // Consume to let TCP do its job well. + +SSL/TLS - Secure connections + +When Dial encounters an amqps:// scheme, it will use the zero value of a +tls.Config. This will only perform server certificate and host verification. + +Use DialTLS when you wish to provide a client certificate (recommended), +include a private certificate authority's certificate in the cert chain for +server validity, or run insecure by not verifying the server certificate dial +your own connection. DialTLS will use the provided tls.Config when it +encounters an amqps:// scheme and will dial a plain connection when it +encounters an amqp:// scheme. + +SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html + +*/ +package amqp diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/examples_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/examples_test.go new file mode 100644 index 000000000..8be53f427 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/examples_test.go @@ -0,0 +1,393 @@ +package amqp_test + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "log" + "net" + "runtime" + "time" + + "github.com/streadway/amqp" +) + +func ExampleConfig_timeout() { + // Provide your own anonymous Dial function that delgates to net.DialTimout + // for custom timeouts + + conn, err := amqp.DialConfig("amqp:///", amqp.Config{ + Dial: func(network, addr string) (net.Conn, error) { + return net.DialTimeout(network, addr, 2*time.Second) + }, + }) + + log.Printf("conn: %v, err: %v", conn, err) +} + +func ExampleDialTLS() { + // To get started with SSL/TLS follow the instructions for adding SSL/TLS + // support in RabbitMQ with a private certificate authority here: + // + // http://www.rabbitmq.com/ssl.html + // + // Then in your rabbitmq.config, disable the plain AMQP port, verify clients + // and fail if no certificate is presented with the following: + // + // [ + // {rabbit, [ + // {tcp_listeners, []}, % listens on 127.0.0.1:5672 + // {ssl_listeners, [5671]}, % listens on 0.0.0.0:5671 + // {ssl_options, [{cacertfile,"/path/to/your/testca/cacert.pem"}, + // {certfile,"/path/to/your/server/cert.pem"}, + // {keyfile,"/path/to/your/server/key.pem"}, + // {verify,verify_peer}, + // {fail_if_no_peer_cert,true}]} + // ]} + // ]. + + cfg := new(tls.Config) + + // The self-signing certificate authority's certificate must be included in + // the RootCAs to be trusted so that the server certificate can be verified. + // + // Alternatively to adding it to the tls.Config you can add the CA's cert to + // your system's root CAs. The tls package will use the system roots + // specific to each support OS. Under OS X, add (drag/drop) your cacert.pem + // file to the 'Certificates' section of KeyChain.app to add and always + // trust. + // + // Or with the command line add and trust the DER encoded certificate: + // + // security add-certificate testca/cacert.cer + // security add-trusted-cert testca/cacert.cer + // + // If you depend on the system root CAs, then use nil for the RootCAs field + // so the system roots will be loaded. + + cfg.RootCAs = x509.NewCertPool() + + if ca, err := ioutil.ReadFile("testca/cacert.pem"); err == nil { + cfg.RootCAs.AppendCertsFromPEM(ca) + } + + // Move the client cert and key to a location specific to your application + // and load them here. + + if cert, err := tls.LoadX509KeyPair("client/cert.pem", "client/key.pem"); err == nil { + cfg.Certificates = append(cfg.Certificates, cert) + } + + // Server names are validated by the crypto/tls package, so the server + // certificate must be made for the hostname in the URL. Find the commonName + // (CN) and make sure the hostname in the URL matches this common name. Per + // the RabbitMQ instructions for a self-signed cert, this defautls to the + // current hostname. + // + // openssl x509 -noout -in server/cert.pem -subject + // + // If your server name in your certificate is different than the host you are + // connecting to, set the hostname used for verification in + // ServerName field of the tls.Config struct. + + conn, err := amqp.DialTLS("amqps://server-name-from-certificate/", cfg) + + log.Printf("conn: %v, err: %v", conn, err) +} + +func ExampleChannel_Confirm_bridge() { + // This example acts as a bridge, shoveling all messages sent from the source + // exchange "log" to destination exchange "log". + + // Confirming publishes can help from overproduction and ensure every message + // is delivered. + + // Setup the source of the store and forward + source, err := amqp.Dial("amqp://source/") + if err != nil { + log.Fatalf("connection.open source: %s", err) + } + defer source.Close() + + chs, err := source.Channel() + if err != nil { + log.Fatalf("channel.open source: %s", err) + } + + if err := chs.ExchangeDeclare("log", "topic", true, false, false, false, nil); err != nil { + log.Fatalf("exchange.declare destination: %s", err) + } + + if _, err := chs.QueueDeclare("remote-tee", true, true, false, false, nil); err != nil { + log.Fatalf("queue.declare source: %s", err) + } + + if err := chs.QueueBind("remote-tee", "#", "logs", false, nil); err != nil { + log.Fatalf("queue.bind source: %s", err) + } + + shovel, err := chs.Consume("remote-tee", "shovel", false, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume source: %s", err) + } + + // Setup the destination of the store and forward + destination, err := amqp.Dial("amqp://destination/") + if err != nil { + log.Fatalf("connection.open destination: %s", err) + } + defer destination.Close() + + chd, err := destination.Channel() + if err != nil { + log.Fatalf("channel.open destination: %s", err) + } + + if err := chd.ExchangeDeclare("log", "topic", true, false, false, false, nil); err != nil { + log.Fatalf("exchange.declare destination: %s", err) + } + + // Buffer of 1 for our single outstanding publishing + confirms := chd.NotifyPublish(make(chan amqp.Confirmation, 1)) + + if err := chd.Confirm(false); err != nil { + log.Fatalf("confirm.select destination: %s", err) + } + + // Now pump the messages, one by one, a smarter implementation + // would batch the deliveries and use multiple ack/nacks + for { + msg, ok := <-shovel + if !ok { + log.Fatalf("source channel closed, see the reconnect example for handling this") + } + + err = chd.Publish("logs", msg.RoutingKey, false, false, amqp.Publishing{ + // Copy all the properties + ContentType: msg.ContentType, + ContentEncoding: msg.ContentEncoding, + DeliveryMode: msg.DeliveryMode, + Priority: msg.Priority, + CorrelationId: msg.CorrelationId, + ReplyTo: msg.ReplyTo, + Expiration: msg.Expiration, + MessageId: msg.MessageId, + Timestamp: msg.Timestamp, + Type: msg.Type, + UserId: msg.UserId, + AppId: msg.AppId, + + // Custom headers + Headers: msg.Headers, + + // And the body + Body: msg.Body, + }) + + if err != nil { + msg.Nack(false, false) + log.Fatalf("basic.publish destination: %s", msg) + } + + // only ack the source delivery when the destination acks the publishing + if confirmed := <-confirms; confirmed.Ack { + msg.Ack(false) + } else { + msg.Nack(false, false) + } + } +} + +func ExampleChannel_Consume() { + // Connects opens an AMQP connection from the credentials in the URL. + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + if err != nil { + log.Fatalf("connection.open: %s", err) + } + defer conn.Close() + + c, err := conn.Channel() + if err != nil { + log.Fatalf("channel.open: %s", err) + } + + // We declare our topology on both the publisher and consumer to ensure they + // are the same. This is part of AMQP being a programmable messaging model. + // + // See the Channel.Publish example for the complimentary declare. + err = c.ExchangeDeclare("logs", "topic", true, false, false, false, nil) + if err != nil { + log.Fatalf("exchange.declare: %s", err) + } + + // Establish our queue topologies that we are responsible for + type bind struct { + queue string + key string + } + + bindings := []bind{ + bind{"page", "alert"}, + bind{"email", "info"}, + bind{"firehose", "#"}, + } + + for _, b := range bindings { + _, err = c.QueueDeclare(b.queue, true, false, false, false, nil) + if err != nil { + log.Fatalf("queue.declare: %v", err) + } + + err = c.QueueBind(b.queue, b.key, "logs", false, nil) + if err != nil { + log.Fatalf("queue.bind: %v", err) + } + } + + // Set our quality of service. Since we're sharing 3 consumers on the same + // channel, we want at least 3 messages in flight. + err = c.Qos(3, 0, false) + if err != nil { + log.Fatalf("basic.qos: %v", err) + } + + // Establish our consumers that have different responsibilities. Our first + // two queues do not ack the messages on the server, so require to be acked + // on the client. + + pages, err := c.Consume("page", "pager", false, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume: %v", err) + } + + go func() { + for log := range pages { + // ... this consumer is responsible for sending pages per log + log.Ack(false) + } + }() + + // Notice how the concern for which messages arrive here are in the AMQP + // topology and not in the queue. We let the server pick a consumer tag this + // time. + + emails, err := c.Consume("email", "", false, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume: %v", err) + } + + go func() { + for log := range emails { + // ... this consumer is responsible for sending emails per log + log.Ack(false) + } + }() + + // This consumer requests that every message is acknowledged as soon as it's + // delivered. + + firehose, err := c.Consume("firehose", "", true, false, false, false, nil) + if err != nil { + log.Fatalf("basic.consume: %v", err) + } + + // To show how to process the items in parallel, we'll use a work pool. + for i := 0; i < runtime.NumCPU(); i++ { + go func(work <-chan amqp.Delivery) { + for _ = range work { + // ... this consumer pulls from the firehose and doesn't need to acknowledge + } + }(firehose) + } + + // Wait until you're ready to finish, could be a signal handler here. + time.Sleep(10 * time.Second) + + // Cancelling a consumer by name will finish the range and gracefully end the + // goroutine + err = c.Cancel("pager", false) + if err != nil { + log.Fatalf("basic.cancel: %v", err) + } + + // deferred closing the Connection will also finish the consumer's ranges of + // their delivery chans. If you need every delivery to be processed, make + // sure to wait for all consumers goroutines to finish before exiting your + // process. +} + +func ExampleChannel_Publish() { + // Connects opens an AMQP connection from the credentials in the URL. + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + if err != nil { + log.Fatalf("connection.open: %s", err) + } + + // This waits for a server acknowledgment which means the sockets will have + // flushed all outbound publishings prior to returning. It's important to + // block on Close to not lose any publishings. + defer conn.Close() + + c, err := conn.Channel() + if err != nil { + log.Fatalf("channel.open: %s", err) + } + + // We declare our topology on both the publisher and consumer to ensure they + // are the same. This is part of AMQP being a programmable messaging model. + // + // See the Channel.Consume example for the complimentary declare. + err = c.ExchangeDeclare("logs", "topic", true, false, false, false, nil) + if err != nil { + log.Fatalf("exchange.declare: %v", err) + } + + // Prepare this message to be persistent. Your publishing requirements may + // be different. + msg := amqp.Publishing{ + DeliveryMode: amqp.Persistent, + Timestamp: time.Now(), + ContentType: "text/plain", + Body: []byte("Go Go AMQP!"), + } + + // This is not a mandatory delivery, so it will be dropped if there are no + // queues bound to the logs exchange. + err = c.Publish("logs", "info", false, false, msg) + if err != nil { + // Since publish is asynchronous this can happen if the network connection + // is reset or if the server has run out of resources. + log.Fatalf("basic.publish: %v", err) + } +} + +func publishAllTheThings(conn *amqp.Connection) { + // ... snarf snarf, barf barf +} + +func ExampleConnection_NotifyBlocked() { + // Simply logs when the server throttles the TCP connection for publishers + + // Test this by tuning your server to have a low memory watermark: + // rabbitmqctl set_vm_memory_high_watermark 0.00000001 + + conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/") + if err != nil { + log.Fatalf("connection.open: %s", err) + } + defer conn.Close() + + blockings := conn.NotifyBlocked(make(chan amqp.Blocking)) + go func() { + for b := range blockings { + if b.Active { + log.Printf("TCP blocked: %q", b.Reason) + } else { + log.Printf("TCP unblocked") + } + } + }() + + // Your application domain channel setup publishings + publishAllTheThings(conn) +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/fuzz.go b/Godeps/_workspace/src/github.com/streadway/amqp/fuzz.go new file mode 100644 index 000000000..bf7c7689b --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/fuzz.go @@ -0,0 +1,16 @@ +// +build gofuzz +package amqp + +import "bytes" + +func Fuzz(data []byte) int { + r := reader{bytes.NewReader(data)} + frame, err := r.ReadFrame() + if err != nil { + if frame != nil { + panic("frame is not nil") + } + return 0 + } + return 1 +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/gen.sh b/Godeps/_workspace/src/github.com/streadway/amqp/gen.sh new file mode 100644 index 000000000..d46e19bd8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/gen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +go run spec/gen.go < spec/amqp0-9-1.stripped.extended.xml | gofmt > spec091.go diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/integration_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/integration_test.go new file mode 100644 index 000000000..ec839f221 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/integration_test.go @@ -0,0 +1,1796 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +// +build integration + +package amqp + +import ( + "bytes" + devrand "crypto/rand" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "math/rand" + "net" + "os" + "reflect" + "strconv" + "sync" + "testing" + "testing/quick" + "time" +) + +func TestIntegrationOpenClose(t *testing.T) { + if c := integrationConnection(t, "open-close"); c != nil { + t.Logf("have connection, calling connection close") + if err := c.Close(); err != nil { + t.Fatalf("connection close: %s", err) + } + t.Logf("connection close OK") + } +} + +func TestIntegrationOpenCloseChannel(t *testing.T) { + if c := integrationConnection(t, "channel"); c != nil { + defer c.Close() + + if _, err := c.Channel(); err != nil { + t.Errorf("Channel could not be opened: %s", err) + } + } +} + +func TestIntegrationOpenConfig(t *testing.T) { + config := Config{} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Fatalf("expected to dial with config %+v integration server: %s", config, err) + } + + if _, err := c.Channel(); err != nil { + t.Fatalf("expected to open channel: %s", err) + } + + if err := c.Close(); err != nil { + t.Fatalf("connection close: %s", err) + } +} + +func TestIntegrationOpenConfigWithNetDial(t *testing.T) { + config := Config{Dial: net.Dial} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + + if _, err := c.Channel(); err != nil { + t.Fatalf("expected to open channel: %s", err) + } + + if err := c.Close(); err != nil { + t.Fatalf("connection close: %s", err) + } +} + +func TestIntegrationLocalAddr(t *testing.T) { + config := Config{} + + c, err := DialConfig(integrationURLFromEnv(), config) + defer c.Close() + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + + a := c.LocalAddr() + _, portString, err := net.SplitHostPort(a.String()) + if err != nil { + t.Errorf("expected to get a local network address with config %+v integration server: %s", config, a.String()) + } + + port, err := strconv.Atoi(portString) + if err != nil { + t.Errorf("expected to get a TCP port number with config %+v integration server: %s", config, err) + } + t.Logf("Connected to port %d\n", port) +} + +// https://github.com/streadway/amqp/issues/94 +func TestExchangePassiveOnMissingExchangeShouldError(t *testing.T) { + c := integrationConnection(t, "exch") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel 1: %s", err) + } + defer ch.Close() + + if err := ch.ExchangeDeclarePassive( + "test-integration-missing-passive-exchange", + "direct", // type + false, // duration (note: is durable) + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err == nil { + t.Fatal("ExchangeDeclarePassive of a missing exchange should return error") + } + } +} + +// https://github.com/streadway/amqp/issues/94 +func TestIntegrationExchangeDeclarePassiveOnDeclaredShouldNotError(t *testing.T) { + c := integrationConnection(t, "exch") + if c != nil { + defer c.Close() + + exchange := "test-integration-decalred-passive-exchange" + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel: %s", err) + } + defer ch.Close() + + if err := ch.ExchangeDeclare( + exchange, // name + "direct", // type + false, // durable + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("declare exchange: %s", err) + } + + if err := ch.ExchangeDeclarePassive( + exchange, // name + "direct", // type + false, // durable + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("ExchangeDeclarePassive on a declared exchange should not error, got: %q", err) + } + } +} + +func TestIntegrationExchange(t *testing.T) { + c := integrationConnection(t, "exch") + if c != nil { + defer c.Close() + + channel, err := c.Channel() + if err != nil { + t.Fatalf("create channel: %s", err) + } + t.Logf("create channel OK") + + exchange := "test-integration-exchange" + + if err := channel.ExchangeDeclare( + exchange, // name + "direct", // type + false, // duration + true, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("declare exchange: %s", err) + } + t.Logf("declare exchange OK") + + if err := channel.ExchangeDelete(exchange, false, false); err != nil { + t.Fatalf("delete exchange: %s", err) + } + t.Logf("delete exchange OK") + + if err := channel.Close(); err != nil { + t.Fatalf("close channel: %s", err) + } + t.Logf("close channel OK") + } +} + +// https://github.com/streadway/amqp/issues/94 +func TestIntegrationQueueDeclarePassiveOnMissingExchangeShouldError(t *testing.T) { + c := integrationConnection(t, "queue") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel1: %s", err) + } + defer ch.Close() + + if _, err := ch.QueueDeclarePassive( + "test-integration-missing-passive-queue", // name + false, // duration (note: not durable) + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err == nil { + t.Fatal("QueueDeclarePassive of a missing queue should error") + } + } +} + +// https://github.com/streadway/amqp/issues/94 +func TestIntegrationPassiveQueue(t *testing.T) { + c := integrationConnection(t, "queue") + if c != nil { + defer c.Close() + + name := "test-integration-declared-passive-queue" + + ch, err := c.Channel() + if err != nil { + t.Fatalf("create channel1: %s", err) + } + defer ch.Close() + + if _, err := ch.QueueDeclare( + name, // name + false, // durable + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("queue declare: %s", err) + } + + if _, err := ch.QueueDeclarePassive( + name, // name + false, // durable + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("QueueDeclarePassive on declared queue should not error, got: %q", err) + } + + if _, err := ch.QueueDeclarePassive( + name, // name + true, // durable (note: differs) + true, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("QueueDeclarePassive on declared queue with different flags should error") + } + } +} + +func TestIntegrationBasicQueueOperations(t *testing.T) { + c := integrationConnection(t, "queue") + if c != nil { + defer c.Close() + + channel, err := c.Channel() + if err != nil { + t.Fatalf("create channel: %s", err) + } + t.Logf("create channel OK") + + exchangeName := "test-basic-ops-exchange" + queueName := "test-basic-ops-queue" + + deleteQueueFirstOptions := []bool{true, false} + for _, deleteQueueFirst := range deleteQueueFirstOptions { + + if err := channel.ExchangeDeclare( + exchangeName, // name + "direct", // type + true, // duration (note: is durable) + false, // auto-delete + false, // internal + false, // nowait + nil, // args + ); err != nil { + t.Fatalf("declare exchange: %s", err) + } + t.Logf("declare exchange OK") + + if _, err := channel.QueueDeclare( + queueName, // name + true, // duration (note: durable) + false, // auto-delete + false, // exclusive + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("queue declare: %s", err) + } + t.Logf("declare queue OK") + + if err := channel.QueueBind( + queueName, // name + "", // routingKey + exchangeName, // sourceExchange + false, // noWait + nil, // arguments + ); err != nil { + t.Fatalf("queue bind: %s", err) + } + t.Logf("queue bind OK") + + if deleteQueueFirst { + if _, err := channel.QueueDelete( + queueName, // name + false, // ifUnused (false=be aggressive) + false, // ifEmpty (false=be aggressive) + false, // noWait + ); err != nil { + t.Fatalf("delete queue (first): %s", err) + } + t.Logf("delete queue (first) OK") + + if err := channel.ExchangeDelete(exchangeName, false, false); err != nil { + t.Fatalf("delete exchange (after delete queue): %s", err) + } + t.Logf("delete exchange (after delete queue) OK") + + } else { // deleteExchangeFirst + if err := channel.ExchangeDelete(exchangeName, false, false); err != nil { + t.Fatalf("delete exchange (first): %s", err) + } + t.Logf("delete exchange (first) OK") + + if _, err := channel.QueueInspect(queueName); err != nil { + t.Fatalf("inspect queue state after deleting exchange: %s", err) + } + t.Logf("queue properly remains after exchange is deleted") + + if _, err := channel.QueueDelete( + queueName, + false, // ifUnused + false, // ifEmpty + false, // noWait + ); err != nil { + t.Fatalf("delete queue (after delete exchange): %s", err) + } + t.Logf("delete queue (after delete exchange) OK") + } + } + + if err := channel.Close(); err != nil { + t.Fatalf("close channel: %s", err) + } + t.Logf("close channel OK") + } +} + +func TestIntegrationConnectionNegotiatesMaxChannels(t *testing.T) { + config := Config{ChannelMax: 0} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + defer c.Close() + + if want, got := defaultChannelMax, c.Config.ChannelMax; want != got { + t.Fatalf("expected connection to negotiate uint16 (%d) channels, got: %d", want, got) + } +} + +func TestIntegrationConnectionNegotiatesClientMaxChannels(t *testing.T) { + config := Config{ChannelMax: 16} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + defer c.Close() + + if want, got := config.ChannelMax, c.Config.ChannelMax; want != got { + t.Fatalf("expected client specified channel limit after handshake %d, got: %d", want, got) + } +} + +func TestIntegrationChannelIDsExhausted(t *testing.T) { + config := Config{ChannelMax: 16} + + c, err := DialConfig(integrationURLFromEnv(), config) + if err != nil { + t.Errorf("expected to dial with config %+v integration server: %s", config, err) + } + defer c.Close() + + for i := 1; i <= c.Config.ChannelMax; i++ { + if _, err := c.Channel(); err != nil { + t.Fatalf("expected allocating all channel ids to succed, failed on %d with %v", i, err) + } + } + + if _, err := c.Channel(); err != ErrChannelMax { + t.Fatalf("expected allocating all channels to produce the client side error %#v, got: %#v", ErrChannelMax, err) + } +} + +func TestIntegrationChannelClosing(t *testing.T) { + c := integrationConnection(t, "closings") + if c != nil { + defer c.Close() + + // This function is run on every channel after it is successfully + // opened. It can do something to verify something. It should be + // quick; many channels may be opened! + f := func(t *testing.T, c *Channel) { + return + } + + // open and close + channel, err := c.Channel() + if err != nil { + t.Fatalf("basic create channel: %s", err) + } + t.Logf("basic create channel OK") + + if err := channel.Close(); err != nil { + t.Fatalf("basic close channel: %s", err) + } + t.Logf("basic close channel OK") + + // deferred close + signal := make(chan bool) + go func() { + channel, err := c.Channel() + if err != nil { + t.Fatalf("second create channel: %s", err) + } + t.Logf("second create channel OK") + + <-signal // a bit of synchronization + f(t, channel) + + defer func() { + if err := channel.Close(); err != nil { + t.Fatalf("deferred close channel: %s", err) + } + t.Logf("deferred close channel OK") + signal <- true + }() + }() + signal <- true + select { + case <-signal: + t.Logf("(got close signal OK)") + break + case <-time.After(250 * time.Millisecond): + t.Fatalf("deferred close: timeout") + } + + // multiple channels + for _, n := range []int{2, 4, 8, 16, 32, 64, 128, 256} { + channels := make([]*Channel, n) + for i := 0; i < n; i++ { + var err error + if channels[i], err = c.Channel(); err != nil { + t.Fatalf("create channel %d/%d: %s", i+1, n, err) + } + } + f(t, channel) + for i, channel := range channels { + if err := channel.Close(); err != nil { + t.Fatalf("close channel %d/%d: %s", i+1, n, err) + } + } + t.Logf("created/closed %d channels OK", n) + } + + } +} + +func TestIntegrationMeaningfulChannelErrors(t *testing.T) { + c := integrationConnection(t, "pub") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("Could not create channel") + } + + queue := "test.integration.channel.error" + + _, err = ch.QueueDeclare(queue, false, true, false, false, nil) + if err != nil { + t.Fatalf("Could not declare") + } + + _, err = ch.QueueDeclare(queue, true, false, false, false, nil) + if err == nil { + t.Fatalf("Expected error, got nil") + } + + e, ok := err.(*Error) + if !ok { + t.Fatalf("Expected type Error response, got %T", err) + } + + if e.Code != PreconditionFailed { + t.Fatalf("Expected PreconditionFailed, got: %+v", e) + } + + _, err = ch.QueueDeclare(queue, false, true, false, false, nil) + if err != ErrClosed { + t.Fatalf("Expected channel to be closed, got: %T", err) + } + } +} + +// https://github.com/streadway/amqp/issues/6 +func TestIntegrationNonBlockingClose(t *testing.T) { + c := integrationConnection(t, "#6") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Fatalf("Could not create channel") + } + + queue := "test.integration.blocking.close" + + _, err = ch.QueueDeclare(queue, false, true, false, false, nil) + if err != nil { + t.Fatalf("Could not declare") + } + + msgs, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + // Simulate a consumer + go func() { + for _ = range msgs { + t.Logf("Oh my, received message on an empty queue") + } + }() + + succeed := make(chan bool) + + go func() { + if err = ch.Close(); err != nil { + t.Fatalf("Close produced an error when it shouldn't") + } + succeed <- true + }() + + select { + case <-succeed: + break + case <-time.After(1 * time.Second): + t.Fatalf("Close timed out after 1s") + } + } +} + +func TestIntegrationPublishConsume(t *testing.T) { + queue := "test.integration.publish.consume" + + c1 := integrationConnection(t, "pub") + c2 := integrationConnection(t, "sub") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, _ := c1.Channel() + sub, _ := c2.Channel() + + pub.QueueDeclare(queue, false, true, false, false, nil) + sub.QueueDeclare(queue, false, true, false, false, nil) + defer pub.QueueDelete(queue, false, false, false) + + messages, _ := sub.Consume(queue, "", false, false, false, false, nil) + + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 1")}) + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 2")}) + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 3")}) + + assertConsumeBody(t, messages, []byte("pub 1")) + assertConsumeBody(t, messages, []byte("pub 2")) + assertConsumeBody(t, messages, []byte("pub 3")) + } +} + +func TestIntegrationConsumeFlow(t *testing.T) { + queue := "test.integration.consumer-flow" + + c1 := integrationConnection(t, "pub-flow") + c2 := integrationConnection(t, "sub-flow") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, _ := c1.Channel() + sub, _ := c2.Channel() + + pub.QueueDeclare(queue, false, true, false, false, nil) + sub.QueueDeclare(queue, false, true, false, false, nil) + defer pub.QueueDelete(queue, false, false, false) + + sub.Qos(1, 0, false) + + messages, _ := sub.Consume(queue, "", false, false, false, false, nil) + + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 1")}) + pub.Publish("", queue, false, false, Publishing{Body: []byte("pub 2")}) + + msg := assertConsumeBody(t, messages, []byte("pub 1")) + + if err := sub.Flow(false); err.(*Error).Code == NotImplemented { + t.Log("flow control is not supported on this version of rabbitmq") + return + } + + msg.Ack(false) + + select { + case <-messages: + t.Fatalf("message was delivered when flow was not active") + default: + } + + sub.Flow(true) + + msg = assertConsumeBody(t, messages, []byte("pub 2")) + msg.Ack(false) + } +} + +func TestIntegrationRecoverNotImplemented(t *testing.T) { + queue := "test.recover" + + if c, ch := integrationQueue(t, queue); c != nil { + if product, ok := c.Properties["product"]; ok && product.(string) == "RabbitMQ" { + defer c.Close() + + err := ch.Recover(false) + + if ex, ok := err.(*Error); !ok || ex.Code != 540 { + t.Fatalf("Expected NOT IMPLEMENTED got: %v", ex) + } + } + } +} + +// This test is driven by a private API to simulate the server sending a channelFlow message +func TestIntegrationPublishFlow(t *testing.T) { + // TODO - no idea how to test without affecting the server or mucking internal APIs + // i'd like to make sure the RW lock can be held by multiple publisher threads + // and that multiple channelFlow messages do not block the dispatch thread +} + +func TestIntegrationConsumeCancel(t *testing.T) { + queue := "test.integration.consume-cancel" + + c := integrationConnection(t, "pub") + + if c != nil { + defer c.Close() + + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + defer ch.QueueDelete(queue, false, false, false) + + messages, _ := ch.Consume(queue, "integration-tag", false, false, false, false, nil) + + ch.Publish("", queue, false, false, Publishing{Body: []byte("1")}) + + assertConsumeBody(t, messages, []byte("1")) + + err := ch.Cancel("integration-tag", false) + if err != nil { + t.Fatalf("error cancelling the consumer: %v", err) + } + + ch.Publish("", queue, false, false, Publishing{Body: []byte("2")}) + + select { + case <-time.After(100 * time.Millisecond): + t.Fatalf("Timeout on Close") + case _, ok := <-messages: + if ok { + t.Fatalf("Extra message on consumer when consumer should have been closed") + } + } + } +} + +func (c *Connection) Generate(r *rand.Rand, _ int) reflect.Value { + urlStr := os.Getenv("AMQP_URL") + if urlStr == "" { + return reflect.ValueOf(nil) + } + + conn, err := Dial(urlStr) + if err != nil { + return reflect.ValueOf(nil) + } + + return reflect.ValueOf(conn) +} + +func (c Publishing) Generate(r *rand.Rand, _ int) reflect.Value { + var ok bool + var t reflect.Value + + p := Publishing{} + //p.DeliveryMode = uint8(r.Intn(3)) + //p.Priority = uint8(r.Intn(8)) + + if r.Intn(2) > 0 { + p.ContentType = "application/octet-stream" + } + + if r.Intn(2) > 0 { + p.ContentEncoding = "gzip" + } + + if r.Intn(2) > 0 { + p.CorrelationId = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.ReplyTo = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.MessageId = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.Type = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.AppId = fmt.Sprintf("%d", r.Int()) + } + + if r.Intn(2) > 0 { + p.Timestamp = time.Unix(r.Int63(), r.Int63()) + } + + if t, ok = quick.Value(reflect.TypeOf(p.Body), r); ok { + p.Body = t.Bytes() + } + + return reflect.ValueOf(p) +} + +func TestQuickPublishOnly(t *testing.T) { + if c := integrationConnection(t, "quick"); c != nil { + defer c.Close() + pub, err := c.Channel() + queue := "test-publish" + + if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + defer pub.QueueDelete(queue, false, false, false) + + quick.Check(func(msg Publishing) bool { + return pub.Publish("", queue, false, false, msg) == nil + }, nil) + } +} + +func TestPublishEmptyBody(t *testing.T) { + c := integrationConnection(t, "empty") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Errorf("Failed to create channel") + return + } + + queue := "test-TestPublishEmptyBody" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Could not declare") + } + + messages, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + err = ch.Publish("", queue, false, false, Publishing{}) + if err != nil { + t.Fatalf("Could not publish") + } + + select { + case msg := <-messages: + if len(msg.Body) != 0 { + t.Errorf("Received non empty body") + } + case <-time.After(200 * time.Millisecond): + t.Errorf("Timeout on receive") + } + } +} + +func TestPublishEmptyBodyWithHeadersIssue67(t *testing.T) { + c := integrationConnection(t, "issue67") + if c != nil { + defer c.Close() + + ch, err := c.Channel() + if err != nil { + t.Errorf("Failed to create channel") + return + } + + queue := "test-TestPublishEmptyBodyWithHeaders" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Could not declare") + } + + messages, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + headers := Table{ + "ham": "spam", + } + + err = ch.Publish("", queue, false, false, Publishing{Headers: headers}) + if err != nil { + t.Fatalf("Could not publish") + } + + select { + case msg := <-messages: + if msg.Headers["ham"] == nil { + t.Fatalf("Headers aren't sent") + } + if msg.Headers["ham"] != "spam" { + t.Fatalf("Headers are wrong") + } + case <-time.After(200 * time.Millisecond): + t.Errorf("Timeout on receive") + } + } +} + +func TestQuickPublishConsumeOnly(t *testing.T) { + c1 := integrationConnection(t, "quick-pub") + c2 := integrationConnection(t, "quick-sub") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, err := c1.Channel() + sub, err := c2.Channel() + + queue := "TestPublishConsumeOnly" + + if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + if _, err = sub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + defer sub.QueueDelete(queue, false, false, false) + + ch, err := sub.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Errorf("Could not sub: %s", err) + } + + quick.CheckEqual( + func(msg Publishing) []byte { + empty := Publishing{Body: msg.Body} + if pub.Publish("", queue, false, false, empty) != nil { + return []byte{'X'} + } + return msg.Body + }, + func(msg Publishing) []byte { + out := <-ch + out.Ack(false) + return out.Body + }, + nil) + } +} + +func TestQuickPublishConsumeBigBody(t *testing.T) { + c1 := integrationConnection(t, "big-pub") + c2 := integrationConnection(t, "big-sub") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, err := c1.Channel() + sub, err := c2.Channel() + + queue := "test-pubsub" + + if _, err = sub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + ch, err := sub.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Errorf("Could not sub: %s", err) + } + + fixture := Publishing{ + Body: make([]byte, 1e4+1000), + } + + if _, err = pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Errorf("Failed to declare: %s", err) + return + } + + err = pub.Publish("", queue, false, false, fixture) + if err != nil { + t.Errorf("Could not publish big body") + } + + select { + case msg := <-ch: + if bytes.Compare(msg.Body, fixture.Body) != 0 { + t.Errorf("Consumed big body didn't match") + } + case <-time.After(200 * time.Millisecond): + t.Errorf("Timeout on receive") + } + } +} + +func TestIntegrationGetOk(t *testing.T) { + if c := integrationConnection(t, "getok"); c != nil { + defer c.Close() + + queue := "test.get-ok" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")}) + + msg, ok, err := ch.Get(queue, false) + + if err != nil { + t.Fatalf("Failed get: %v", err) + } + + if !ok { + t.Fatalf("Get on a queued message did not find the message") + } + + if string(msg.Body) != "ok" { + t.Fatalf("Get did not get the correct message") + } + } +} + +func TestIntegrationGetEmpty(t *testing.T) { + if c := integrationConnection(t, "getok"); c != nil { + defer c.Close() + + queue := "test.get-ok" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + + _, ok, err := ch.Get(queue, false) + + if err != nil { + t.Fatalf("Failed get: %v", err) + } + + if !ok { + t.Fatalf("Get on a queued message retrieved a message when it shouldn't have") + } + } +} + +func TestIntegrationTxCommit(t *testing.T) { + if c := integrationConnection(t, "txcommit"); c != nil { + defer c.Close() + + queue := "test.tx.commit" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + + if err := ch.Tx(); err != nil { + t.Fatalf("tx.select failed") + } + + ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")}) + + if err := ch.TxCommit(); err != nil { + t.Fatalf("tx.commit failed") + } + + msg, ok, err := ch.Get(queue, false) + + if err != nil || !ok { + t.Fatalf("Failed get: %v", err) + } + + if string(msg.Body) != "ok" { + t.Fatalf("Get did not get the correct message from the transaction") + } + } +} + +func TestIntegrationTxRollback(t *testing.T) { + if c := integrationConnection(t, "txrollback"); c != nil { + defer c.Close() + + queue := "test.tx.rollback" + ch, _ := c.Channel() + + ch.QueueDeclare(queue, false, true, false, false, nil) + + if err := ch.Tx(); err != nil { + t.Fatalf("tx.select failed") + } + + ch.Publish("", queue, false, false, Publishing{Body: []byte("ok")}) + + if err := ch.TxRollback(); err != nil { + t.Fatalf("tx.rollback failed") + } + + _, ok, err := ch.Get(queue, false) + + if err != nil { + t.Fatalf("Failed get: %v", err) + } + + if ok { + t.Fatalf("message was published when it should have been rolled back") + } + } +} + +func TestIntegrationReturn(t *testing.T) { + if c, ch := integrationQueue(t, "return"); c != nil { + defer c.Close() + + ret := make(chan Return, 1) + + ch.NotifyReturn(ret) + + // mandatory publish to an exchange without a binding should be returned + ch.Publish("", "return-without-binding", true, false, Publishing{Body: []byte("mandatory")}) + + select { + case res := <-ret: + if string(res.Body) != "mandatory" { + t.Fatalf("expected return of the same message") + } + + if res.ReplyCode != NoRoute { + t.Fatalf("expected no consumers reply code on the Return result, got: %v", res.ReplyCode) + } + + case <-time.After(200 * time.Millisecond): + t.Fatalf("no return was received within 200ms") + } + } +} + +func TestIntegrationCancel(t *testing.T) { + queue := "cancel" + consumerTag := "test.cancel" + + if c, ch := integrationQueue(t, queue); c != nil { + defer c.Close() + + cancels := ch.NotifyCancel(make(chan string, 1)) + + go func() { + if _, err := ch.Consume(queue, consumerTag, false, false, false, false, nil); err != nil { + t.Fatalf("cannot consume from %q to test NotifyCancel: %v", queue, err) + } + if _, err := ch.QueueDelete(queue, false, false, false); err != nil { + t.Fatalf("cannot delete integration queue: %v", err) + } + }() + + select { + case tag := <-cancels: + if want, got := consumerTag, tag; want != got { + t.Fatalf("expected to be notified of deleted queue with consumer tag, got: %q", got) + } + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected to be notified of deleted queue with 200ms") + } + } +} + +func TestIntegrationConfirm(t *testing.T) { + if c, ch := integrationQueue(t, "confirm"); c != nil { + defer c.Close() + + confirms := ch.NotifyPublish(make(chan Confirmation, 1)) + + if err := ch.Confirm(false); err != nil { + t.Fatalf("could not confirm") + } + + ch.Publish("", "confirm", false, false, Publishing{Body: []byte("confirm")}) + + select { + case confirmed := <-confirms: + if confirmed.DeliveryTag != 1 { + t.Fatalf("expected ack starting with delivery tag of 1") + } + case <-time.After(200 * time.Millisecond): + t.Fatalf("no ack was received within 200ms") + } + } +} + +// https://github.com/streadway/amqp/issues/61 +func TestRoundTripAllFieldValueTypes61(t *testing.T) { + if conn := integrationConnection(t, "issue61"); conn != nil { + defer conn.Close() + timestamp := time.Unix(100000000, 0) + + headers := Table{ + "A": []interface{}{ + []interface{}{"nested array", int32(3)}, + Decimal{2, 1}, + Table{"S": "nested table in array"}, + int32(2 << 20), + string("array string"), + timestamp, + nil, + byte(2), + float64(2.64), + float32(2.32), + int64(2 << 60), + int16(2 << 10), + bool(true), + []byte{'b', '2'}, + }, + "D": Decimal{1, 1}, + "F": Table{"S": "nested table in table"}, + "I": int32(1 << 20), + "S": string("string"), + "T": timestamp, + "V": nil, + "b": byte(1), + "d": float64(1.64), + "f": float32(1.32), + "l": int64(1 << 60), + "s": int16(1 << 10), + "t": bool(true), + "x": []byte{'b', '1'}, + } + + queue := "test.issue61-roundtrip" + ch, _ := conn.Channel() + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Could not declare") + } + + msgs, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Could not consume") + } + + err = ch.Publish("", queue, false, false, Publishing{Body: []byte("ignored"), Headers: headers}) + if err != nil { + t.Fatalf("Could not publish: %v", err) + } + + msg, ok := <-msgs + + if !ok { + t.Fatalf("Channel closed prematurely likely due to publish exception") + } + + for k, v := range headers { + if !reflect.DeepEqual(v, msg.Headers[k]) { + t.Errorf("Round trip header not the same for key %q: expected: %#v, got %#v", k, v, msg.Headers[k]) + } + } + } +} + +// Declares a queue with the x-message-ttl extension to exercise integer +// serialization. +// +// Relates to https://github.com/streadway/amqp/issues/60 +// +func TestDeclareArgsXMessageTTL(t *testing.T) { + if conn := integrationConnection(t, "declareTTL"); conn != nil { + defer conn.Close() + + ch, _ := conn.Channel() + args := Table{"x-message-ttl": int32(9000000)} + + // should not drop the connection + if _, err := ch.QueueDeclare("declareWithTTL", false, true, false, false, args); err != nil { + t.Fatalf("cannot declare with TTL: got: %v", err) + } + } +} + +// Sets up the topology where rejected messages will be forwarded +// to a fanout exchange, with a single queue bound. +// +// Relates to https://github.com/streadway/amqp/issues/56 +// +func TestDeclareArgsRejectToDeadLetterQueue(t *testing.T) { + if conn := integrationConnection(t, "declareArgs"); conn != nil { + defer conn.Close() + + ex, q := "declareArgs", "declareArgs-deliveries" + dlex, dlq := ex+"-dead-letter", q+"-dead-letter" + + ch, _ := conn.Channel() + + if err := ch.ExchangeDeclare(ex, "fanout", false, true, false, false, nil); err != nil { + t.Fatalf("cannot declare %v: got: %v", ex, err) + } + + if err := ch.ExchangeDeclare(dlex, "fanout", false, true, false, false, nil); err != nil { + t.Fatalf("cannot declare %v: got: %v", dlex, err) + } + + if _, err := ch.QueueDeclare(dlq, false, true, false, false, nil); err != nil { + t.Fatalf("cannot declare %v: got: %v", dlq, err) + } + + if err := ch.QueueBind(dlq, "#", dlex, false, nil); err != nil { + t.Fatalf("cannot bind %v to %v: got: %v", dlq, dlex, err) + } + + if _, err := ch.QueueDeclare(q, false, true, false, false, Table{ + "x-dead-letter-exchange": dlex, + }); err != nil { + t.Fatalf("cannot declare %v with dlq %v: got: %v", q, dlex, err) + } + + if err := ch.QueueBind(q, "#", ex, false, nil); err != nil { + t.Fatalf("cannot bind %v: got: %v", ex, err) + } + + fails, err := ch.Consume(q, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("cannot consume %v: got: %v", q, err) + } + + // Reject everything consumed + go func() { + for d := range fails { + d.Reject(false) + } + }() + + // Publish the 'poison' + if err := ch.Publish(ex, q, true, false, Publishing{Body: []byte("ignored")}); err != nil { + t.Fatalf("publishing failed") + } + + // spin-get until message arrives on the dead-letter queue with a + // synchronous parse to exercise the array field (x-death) set by the + // server relating to issue-56 + for i := 0; i < 10; i++ { + d, got, err := ch.Get(dlq, false) + if !got && err == nil { + continue + } else if err != nil { + t.Fatalf("expected success in parsing reject, got: %v", err) + } else { + // pass if we've parsed an array + if v, ok := d.Headers["x-death"]; ok { + if _, ok := v.([]interface{}); ok { + return + } + } + t.Fatalf("array field x-death expected in the headers, got: %v (%T)", d.Headers, d.Headers["x-death"]) + } + } + + t.Fatalf("expectd dead-letter after 10 get attempts") + } +} + +// https://github.com/streadway/amqp/issues/48 +func TestDeadlockConsumerIssue48(t *testing.T) { + if conn := integrationConnection(t, "issue48"); conn != nil { + defer conn.Close() + + deadline := make(chan bool) + go func() { + select { + case <-time.After(5 * time.Second): + panic("expected to receive 2 deliveries while in an RPC, got a deadlock") + case <-deadline: + // pass + } + }() + + ch, err := conn.Channel() + if err != nil { + t.Fatalf("got error on channel.open: %v", err) + } + + queue := "test-issue48" + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("expected to declare a queue: %v", err) + } + + if err := ch.Confirm(false); err != nil { + t.Fatalf("got error on confirm: %v", err) + } + + confirms := ch.NotifyPublish(make(chan Confirmation, 2)) + + for i := 0; i < cap(confirms); i++ { + // Fill the queue with some new or remaining publishings + ch.Publish("", queue, false, false, Publishing{Body: []byte("")}) + } + + for i := 0; i < cap(confirms); i++ { + // Wait for them to land on the queue so they'll be delivered on consume + <-confirms + } + + // Consuming should send them all on the wire + msgs, err := ch.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("got error on consume: %v", err) + } + + // We pop one off the chan, the other is on the wire + <-msgs + + // Opening a new channel (any RPC) while another delivery is on the wire + if _, err := conn.Channel(); err != nil { + t.Fatalf("got error on consume: %v", err) + } + + // We pop the next off the chan + <-msgs + + deadline <- true + } +} + +// https://github.com/streadway/amqp/issues/46 +func TestRepeatedChannelExceptionWithPublishAndMaxProcsIssue46(t *testing.T) { + conn := integrationConnection(t, "issue46") + if conn != nil { + for i := 0; i < 100; i++ { + ch, err := conn.Channel() + if err != nil { + t.Fatalf("expected error only on publish, got error on channel.open: %v", err) + } + + for j := 0; j < 10; j++ { + err = ch.Publish("not-existing-exchange", "some-key", false, false, Publishing{Body: []byte("some-data")}) + if err, ok := err.(Error); ok { + if err.Code != 504 { + t.Fatalf("expected channel only exception, got: %v", err) + } + } + } + } + } +} + +// https://github.com/streadway/amqp/issues/43 +func TestChannelExceptionWithCloseIssue43(t *testing.T) { + conn := integrationConnection(t, "issue43") + if conn != nil { + go func() { + for err := range conn.NotifyClose(make(chan *Error)) { + t.Log(err.Error()) + } + }() + + c1, err := conn.Channel() + if err != nil { + panic(err) + } + + go func() { + for err := range c1.NotifyClose(make(chan *Error)) { + t.Log("Channel1 Close: " + err.Error()) + } + }() + + c2, err := conn.Channel() + if err != nil { + panic(err) + } + + go func() { + for err := range c2.NotifyClose(make(chan *Error)) { + t.Log("Channel2 Close: " + err.Error()) + } + }() + + // Cause an asynchronous channel exception causing the server + // to send a "channel.close" method either before or after the next + // asynchronous method. + err = c1.Publish("nonexisting-exchange", "", false, false, Publishing{}) + if err != nil { + panic(err) + } + + // Receive or send the channel close method, the channel shuts down + // but this expects a channel.close-ok to be received. + c1.Close() + + // This ensures that the 2nd channel is unaffected by the channel exception + // on channel 1. + err = c2.ExchangeDeclare("test-channel-still-exists", "direct", false, true, false, false, nil) + if err != nil { + panic(err) + } + } +} + +// https://github.com/streadway/amqp/issues/7 +func TestCorruptedMessageIssue7(t *testing.T) { + messageCount := 1024 + + c1 := integrationConnection(t, "") + c2 := integrationConnection(t, "") + + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + pub, err := c1.Channel() + if err != nil { + t.Fatalf("Cannot create Channel") + } + + sub, err := c2.Channel() + if err != nil { + t.Fatalf("Cannot create Channel") + } + + queue := "test-corrupted-message-regression" + + if _, err := pub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Cannot declare") + } + + if _, err := sub.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("Cannot declare") + } + + msgs, err := sub.Consume(queue, "", false, false, false, false, nil) + if err != nil { + t.Fatalf("Cannot consume") + } + + for i := 0; i < messageCount; i++ { + err := pub.Publish("", queue, false, false, Publishing{ + Body: generateCrc32Random(7 * i), + }) + + if err != nil { + t.Fatalf("Failed to publish") + } + } + + for i := 0; i < messageCount; i++ { + select { + case msg := <-msgs: + assertMessageCrc32(t, msg.Body, fmt.Sprintf("missed match at %d", i)) + case <-time.After(200 * time.Millisecond): + t.Fatalf("Timeout on recv") + } + } + } +} + +// https://github.com/streadway/amqp/issues/136 +func TestChannelCounterShouldNotPanicIssue136(t *testing.T) { + if c := integrationConnection(t, "issue136"); c != nil { + defer c.Close() + var wg sync.WaitGroup + + // exceeds 65535 channels + for i := 0; i < 8; i++ { + wg.Add(1) + go func(i int) { + for j := 0; j < 10000; j++ { + ch, err := c.Channel() + if err != nil { + t.Fatalf("failed to create channel %d:%d, got: %v", i, j, err) + } + if err := ch.Close(); err != nil { + t.Fatalf("failed to close channel %d:%d, got: %v", i, j, err) + } + } + wg.Done() + }(i) + } + wg.Wait() + } +} + +func TestExchangeDeclarePrecondition(t *testing.T) { + c1 := integrationConnection(t, "exchange-double-declare") + c2 := integrationConnection(t, "exchange-double-declare-cleanup") + if c1 != nil && c2 != nil { + defer c1.Close() + defer c2.Close() + + ch, err := c1.Channel() + if err != nil { + t.Fatalf("Create channel") + } + + exchange := "test-mismatched-redeclare" + + err = ch.ExchangeDeclare( + exchange, + "direct", // exchangeType + false, // durable + true, // auto-delete + false, // internal + false, // noWait + nil, // arguments + ) + if err != nil { + t.Fatalf("Could not initially declare exchange") + } + + err = ch.ExchangeDeclare( + exchange, + "direct", + true, // different durability + true, + false, + false, + nil, + ) + + if err == nil { + t.Fatalf("Expected to fail a redeclare with different durability, didn't receive an error") + } + + if err, ok := err.(Error); ok { + if err.Code != PreconditionFailed { + t.Fatalf("Expected precondition error") + } + if !err.Recover { + t.Fatalf("Expected to be able to recover") + } + } + + ch2, _ := c2.Channel() + if err = ch2.ExchangeDelete(exchange, false, false); err != nil { + t.Fatalf("Could not delete exchange: %v", err) + } + } +} + +func TestRabbitMQQueueTTLGet(t *testing.T) { + if c := integrationRabbitMQ(t, "ttl"); c != nil { + defer c.Close() + + queue := "test.rabbitmq-message-ttl" + channel, err := c.Channel() + if err != nil { + t.Fatalf("channel: %v", err) + } + + if _, err = channel.QueueDeclare( + queue, + false, + true, + false, + false, + Table{"x-message-ttl": int32(100)}, // in ms + ); err != nil { + t.Fatalf("queue declare: %s", err) + } + + channel.Publish("", queue, false, false, Publishing{Body: []byte("ttl")}) + + time.Sleep(200 * time.Millisecond) + + _, ok, err := channel.Get(queue, false) + + if ok { + t.Fatalf("Expected the message to expire in 100ms, it didn't expire after 200ms") + } + + if err != nil { + t.Fatalf("Failed to get on ttl queue") + } + } +} + +func TestRabbitMQQueueNackMultipleRequeue(t *testing.T) { + if c := integrationRabbitMQ(t, "nack"); c != nil { + defer c.Close() + + if c.isCapable("basic.nack") { + queue := "test.rabbitmq-basic-nack" + channel, err := c.Channel() + if err != nil { + t.Fatalf("channel: %v", err) + } + + if _, err = channel.QueueDeclare(queue, false, true, false, false, nil); err != nil { + t.Fatalf("queue declare: %s", err) + } + + channel.Publish("", queue, false, false, Publishing{Body: []byte("1")}) + channel.Publish("", queue, false, false, Publishing{Body: []byte("2")}) + + m1, ok, err := channel.Get(queue, false) + if !ok || err != nil || m1.Body[0] != '1' { + t.Fatalf("could not get message %v", m1) + } + + m2, ok, err := channel.Get(queue, false) + if !ok || err != nil || m2.Body[0] != '2' { + t.Fatalf("could not get message %v", m2) + } + + m2.Nack(true, true) + + m1, ok, err = channel.Get(queue, false) + if !ok || err != nil || m1.Body[0] != '1' { + t.Fatalf("could not get message %v", m1) + } + + m2, ok, err = channel.Get(queue, false) + if !ok || err != nil || m2.Body[0] != '2' { + t.Fatalf("could not get message %v", m2) + } + } + } +} + +/* + * Support for integration tests + */ + +func integrationURLFromEnv() string { + url := os.Getenv("AMQP_URL") + if url == "" { + url = "amqp://" + } + return url +} + +func loggedConnection(t *testing.T, conn *Connection, name string) *Connection { + if name != "" { + conn.conn = &logIO{t, name, conn.conn} + } + return conn +} + +// Returns a conneciton to the AMQP if the AMQP_URL environment +// variable is set and a connnection can be established. +func integrationConnection(t *testing.T, name string) *Connection { + conn, err := Dial(integrationURLFromEnv()) + if err != nil { + t.Errorf("dial integration server: %s", err) + return nil + } + return loggedConnection(t, conn, name) +} + +// Returns a connection, channel and delcares a queue when the AMQP_URL is in the environment +func integrationQueue(t *testing.T, name string) (*Connection, *Channel) { + if conn := integrationConnection(t, name); conn != nil { + if channel, err := conn.Channel(); err == nil { + if _, err = channel.QueueDeclare(name, false, true, false, false, nil); err == nil { + return conn, channel + } + } + } + return nil, nil +} + +// Delegates to integrationConnection and only returns a connection if the +// product is RabbitMQ +func integrationRabbitMQ(t *testing.T, name string) *Connection { + if conn := integrationConnection(t, "connect"); conn != nil { + if server, ok := conn.Properties["product"]; ok && server == "RabbitMQ" { + return conn + } + } + + return nil +} + +func assertConsumeBody(t *testing.T, messages <-chan Delivery, want []byte) (msg *Delivery) { + select { + case got := <-messages: + if bytes.Compare(want, got.Body) != 0 { + t.Fatalf("Message body does not match want: %v, got: %v, for: %+v", want, got.Body, got) + } + msg = &got + case <-time.After(200 * time.Millisecond): + t.Fatalf("Timeout waiting for %v", want) + } + + return msg +} + +// Pulls out the CRC and verifies the remaining content against the CRC +func assertMessageCrc32(t *testing.T, msg []byte, assert string) { + size := binary.BigEndian.Uint32(msg[:4]) + + crc := crc32.NewIEEE() + crc.Write(msg[8:]) + + if binary.BigEndian.Uint32(msg[4:8]) != crc.Sum32() { + t.Fatalf("Message does not match CRC: %s", assert) + } + + if int(size) != len(msg)-8 { + t.Fatalf("Message does not match size, should=%d, is=%d: %s", size, len(msg)-8, assert) + } +} + +// Creates a random body size with a leading 32-bit CRC in network byte order +// that verifies the remaining slice +func generateCrc32Random(size int) []byte { + msg := make([]byte, size+8) + if _, err := io.ReadFull(devrand.Reader, msg); err != nil { + panic(err) + } + + crc := crc32.NewIEEE() + crc.Write(msg[8:]) + + binary.BigEndian.PutUint32(msg[0:4], uint32(size)) + binary.BigEndian.PutUint32(msg[4:8], crc.Sum32()) + + return msg +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/read.go b/Godeps/_workspace/src/github.com/streadway/amqp/read.go new file mode 100644 index 000000000..74e90ef8f --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/read.go @@ -0,0 +1,447 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "time" +) + +/* +Reads a frame from an input stream and returns an interface that can be cast into +one of the following: + + methodFrame + PropertiesFrame + bodyFrame + heartbeatFrame + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a +'frame-end' octet that detects malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or + +“gathering reads” to avoid doing three separate system calls to read a frame. +*/ +func (me *reader) ReadFrame() (frame frame, err error) { + var scratch [7]byte + + if _, err = io.ReadFull(me.r, scratch[:7]); err != nil { + return + } + + typ := uint8(scratch[0]) + channel := binary.BigEndian.Uint16(scratch[1:3]) + size := binary.BigEndian.Uint32(scratch[3:7]) + + switch typ { + case frameMethod: + if frame, err = me.parseMethodFrame(channel, size); err != nil { + return + } + + case frameHeader: + if frame, err = me.parseHeaderFrame(channel, size); err != nil { + return + } + + case frameBody: + if frame, err = me.parseBodyFrame(channel, size); err != nil { + return nil, err + } + + case frameHeartbeat: + if frame, err = me.parseHeartbeatFrame(channel, size); err != nil { + return + } + + default: + return nil, ErrFrame + } + + if _, err = io.ReadFull(me.r, scratch[:1]); err != nil { + return nil, err + } + + if scratch[0] != frameEnd { + return nil, ErrFrame + } + + return +} + +func readShortstr(r io.Reader) (v string, err error) { + var length uint8 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readLongstr(r io.Reader) (v string, err error) { + var length uint32 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readDecimal(r io.Reader) (v Decimal, err error) { + if err = binary.Read(r, binary.BigEndian, &v.Scale); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &v.Value); err != nil { + return + } + return +} + +func readFloat32(r io.Reader) (v float32, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readFloat64(r io.Reader) (v float64, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readTimestamp(r io.Reader) (v time.Time, err error) { + var sec int64 + if err = binary.Read(r, binary.BigEndian, &sec); err != nil { + return + } + return time.Unix(sec, 0), nil +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func readField(r io.Reader) (v interface{}, err error) { + var typ byte + if err = binary.Read(r, binary.BigEndian, &typ); err != nil { + return + } + + switch typ { + case 't': + var value uint8 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return (value != 0), nil + + case 'b': + var value [1]byte + if _, err = io.ReadFull(r, value[0:1]); err != nil { + return + } + return value[0], nil + + case 's': + var value int16 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'I': + var value int32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'l': + var value int64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'f': + var value float32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'd': + var value float64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'D': + return readDecimal(r) + + case 'S': + return readLongstr(r) + + case 'A': + return readArray(r) + + case 'T': + return readTimestamp(r) + + case 'F': + return readTable(r) + + case 'x': + var len int32 + if err = binary.Read(r, binary.BigEndian, &len); err != nil { + return nil, err + } + + value := make([]byte, len) + if _, err = io.ReadFull(r, value); err != nil { + return nil, err + } + return value, err + + case 'V': + return nil, nil + } + + return nil, ErrSyntax +} + +/* + Field tables are long strings that contain packed name-value pairs. The + name-value pairs are encoded as short string defining the name, and octet + defining the values type and then the value itself. The valid field types for + tables are an extension of the native integer, bit, string, and timestamp + types, and are shown in the grammar. Multi-octet integer fields are always + held in network byte order. +*/ +func readTable(r io.Reader) (table Table, err error) { + var nested bytes.Buffer + var str string + + if str, err = readLongstr(r); err != nil { + return + } + + nested.Write([]byte(str)) + + table = make(Table) + + for nested.Len() > 0 { + var key string + var value interface{} + + if key, err = readShortstr(&nested); err != nil { + return + } + + if value, err = readField(&nested); err != nil { + return + } + + table[key] = value + } + + return +} + +func readArray(r io.Reader) ([]interface{}, error) { + var size uint32 + var err error + + if err = binary.Read(r, binary.BigEndian, &size); err != nil { + return nil, err + } + + lim := &io.LimitedReader{R: r, N: int64(size)} + arr := make([]interface{}, 0) + var field interface{} + + for { + if field, err = readField(lim); err != nil { + if err == io.EOF { + break + } + return nil, err + } + arr = append(arr, field) + } + + return arr, nil +} + +// Checks if this bit mask matches the flags bitset +func hasProperty(mask uint16, prop int) bool { + return int(mask)&prop > 0 +} + +func (me *reader) parseHeaderFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &headerFrame{ + ChannelId: channel, + } + + if err = binary.Read(me.r, binary.BigEndian, &hf.ClassId); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &hf.weight); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &hf.Size); err != nil { + return + } + + var flags uint16 + + if err = binary.Read(me.r, binary.BigEndian, &flags); err != nil { + return + } + + if hasProperty(flags, flagContentType) { + if hf.Properties.ContentType, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagContentEncoding) { + if hf.Properties.ContentEncoding, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagHeaders) { + if hf.Properties.Headers, err = readTable(me.r); err != nil { + return + } + } + if hasProperty(flags, flagDeliveryMode) { + if err = binary.Read(me.r, binary.BigEndian, &hf.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(flags, flagPriority) { + if err = binary.Read(me.r, binary.BigEndian, &hf.Properties.Priority); err != nil { + return + } + } + if hasProperty(flags, flagCorrelationId) { + if hf.Properties.CorrelationId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagReplyTo) { + if hf.Properties.ReplyTo, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagExpiration) { + if hf.Properties.Expiration, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagMessageId) { + if hf.Properties.MessageId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagTimestamp) { + if hf.Properties.Timestamp, err = readTimestamp(me.r); err != nil { + return + } + } + if hasProperty(flags, flagType) { + if hf.Properties.Type, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagUserId) { + if hf.Properties.UserId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagAppId) { + if hf.Properties.AppId, err = readShortstr(me.r); err != nil { + return + } + } + if hasProperty(flags, flagReserved1) { + if hf.Properties.reserved1, err = readShortstr(me.r); err != nil { + return + } + } + + return hf, nil +} + +func (me *reader) parseBodyFrame(channel uint16, size uint32) (frame frame, err error) { + bf := &bodyFrame{ + ChannelId: channel, + Body: make([]byte, size), + } + + if _, err = io.ReadFull(me.r, bf.Body); err != nil { + return nil, err + } + + return bf, nil +} + +var errHeartbeatPayload = errors.New("Heartbeats should not have a payload") + +func (me *reader) parseHeartbeatFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &heartbeatFrame{ + ChannelId: channel, + } + + if size > 0 { + return nil, errHeartbeatPayload + } + + return hf, nil +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/read_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/read_test.go new file mode 100644 index 000000000..bb0e30f02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/read_test.go @@ -0,0 +1,22 @@ +package amqp + +import ( + "strings" + "testing" +) + +func TestGoFuzzCrashers(t *testing.T) { + testData := []string{ + "\b000000", + "\x02\x16\x10�[��\t\xbdui�" + "\x10\x01\x00\xff\xbf\xef\xbfサn\x99\x00\x10r", + "\x0300\x00\x00\x00\x040000", + } + + for idx, testStr := range testData { + r := reader{strings.NewReader(testStr)} + frame, err := r.ReadFrame() + if err != nil && frame != nil { + t.Errorf("%d. frame is not nil: %#v err = %v", idx, frame, err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/reconnect_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/reconnect_test.go new file mode 100644 index 000000000..5a06cb7ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/reconnect_test.go @@ -0,0 +1,113 @@ +package amqp_test + +import ( + "fmt" + "github.com/streadway/amqp" + "os" +) + +// Every connection should declare the topology they expect +func setup(url, queue string) (*amqp.Connection, *amqp.Channel, error) { + conn, err := amqp.Dial(url) + if err != nil { + return nil, nil, err + } + + ch, err := conn.Channel() + if err != nil { + return nil, nil, err + } + + if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil { + return nil, nil, err + } + + return conn, ch, nil +} + +func consume(url, queue string) (*amqp.Connection, <-chan amqp.Delivery, error) { + conn, ch, err := setup(url, queue) + if err != nil { + return nil, nil, err + } + + // Indicate we only want 1 message to acknowledge at a time. + if err := ch.Qos(1, 0, false); err != nil { + return nil, nil, err + } + + // Exclusive consumer + deliveries, err := ch.Consume(queue, "", false, true, false, false, nil) + + return conn, deliveries, err +} + +func ExampleConnection_reconnect() { + if url := os.Getenv("AMQP_URL"); url != "" { + queue := "example.reconnect" + + // The connection/channel for publishing to interleave the ingress messages + // between reconnects, shares the same topology as the consumer. If we rather + // sent all messages up front, the first consumer would receive every message. + // We would rather show how the messages are not lost between reconnects. + _, pub, err := setup(url, queue) + if err != nil { + fmt.Println("err publisher setup:", err) + return + } + + // Purge the queue from the publisher side to establish initial state + if _, err := pub.QueuePurge(queue, false); err != nil { + fmt.Println("err purge:", err) + return + } + + // Reconnect simulation, should be for { ... } in production + for i := 1; i <= 3; i++ { + fmt.Println("connect") + + conn, deliveries, err := consume(url, queue) + if err != nil { + fmt.Println("err consume:", err) + return + } + + // Simulate a producer on a different connection showing that consumers + // continue where they were left off after each reconnect. + if err := pub.Publish("", queue, false, false, amqp.Publishing{ + Body: []byte(fmt.Sprintf("%d", i)), + }); err != nil { + fmt.Println("err publish:", err) + return + } + + // Simulates a consumer that when the range finishes, will setup a new + // session and begin ranging over the deliveries again. + for msg := range deliveries { + fmt.Println(string(msg.Body)) + msg.Ack(false) + + // Simulate an error like a server restart, loss of route or operator + // intervention that results in the connection terminating + go conn.Close() + } + } + } else { + // pass with expected output when not running in an integration + // environment. + fmt.Println("connect") + fmt.Println("1") + fmt.Println("connect") + fmt.Println("2") + fmt.Println("connect") + fmt.Println("3") + } + + // Output: + // connect + // 1 + // connect + // 2 + // connect + // 3 +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/return.go b/Godeps/_workspace/src/github.com/streadway/amqp/return.go new file mode 100644 index 000000000..dfebd635d --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/return.go @@ -0,0 +1,64 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "time" +) + +// Return captures a flattened struct of fields returned by the server when a +// Publishing is unable to be delivered either due to the `mandatory` flag set +// and no route found, or `immediate` flag set and no free consumer. +type Return struct { + ReplyCode uint16 // reason + ReplyText string // description + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implemention use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + + Body []byte +} + +func newReturn(msg basicReturn) *Return { + props, body := msg.getContent() + + return &Return{ + ReplyCode: msg.ReplyCode, + ReplyText: msg.ReplyText, + Exchange: msg.Exchange, + RoutingKey: msg.RoutingKey, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/shared_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/shared_test.go new file mode 100644 index 000000000..2e4715fa0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/shared_test.go @@ -0,0 +1,71 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "encoding/hex" + "io" + "testing" +) + +type pipe struct { + r *io.PipeReader + w *io.PipeWriter +} + +func (p pipe) Read(b []byte) (int, error) { + return p.r.Read(b) +} + +func (p pipe) Write(b []byte) (int, error) { + return p.w.Write(b) +} + +func (p pipe) Close() error { + p.r.Close() + p.w.Close() + return nil +} + +type logIO struct { + t *testing.T + prefix string + proxy io.ReadWriteCloser +} + +func (me *logIO) Read(p []byte) (n int, err error) { + me.t.Logf("%s reading %d\n", me.prefix, len(p)) + n, err = me.proxy.Read(p) + if err != nil { + me.t.Logf("%s read %x: %v\n", me.prefix, p[0:n], err) + } else { + me.t.Logf("%s read:\n%s\n", me.prefix, hex.Dump(p[0:n])) + //fmt.Printf("%s read:\n%s\n", me.prefix, hex.Dump(p[0:n])) + } + return +} + +func (me *logIO) Write(p []byte) (n int, err error) { + me.t.Logf("%s writing %d\n", me.prefix, len(p)) + n, err = me.proxy.Write(p) + if err != nil { + me.t.Logf("%s write %d, %x: %v\n", me.prefix, len(p), p[0:n], err) + } else { + me.t.Logf("%s write %d:\n%s", me.prefix, len(p), hex.Dump(p[0:n])) + //fmt.Printf("%s write %d:\n%s", me.prefix, len(p), hex.Dump(p[0:n])) + } + return +} + +func (me *logIO) Close() (err error) { + err = me.proxy.Close() + if err != nil { + me.t.Logf("%s close : %v\n", me.prefix, err) + } else { + me.t.Logf("%s close\n", me.prefix, err) + } + return +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml b/Godeps/_workspace/src/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml new file mode 100644 index 000000000..fbddb93a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/spec/amqp0-9-1.stripped.extended.xml @@ -0,0 +1,537 @@ + + + + + + + + + + + + + + + + + Errata: Section 1.2 ought to define an exception 312 "No route", which used to + exist in 0-9 and is what RabbitMQ sends back with 'basic.return' when a + 'mandatory' message cannot be delivered to any queue. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/spec/gen.go b/Godeps/_workspace/src/github.com/streadway/amqp/spec/gen.go new file mode 100644 index 000000000..1861b9ebb --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/spec/gen.go @@ -0,0 +1,536 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +// +build ignore + +package main + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "regexp" + "strings" + "text/template" +) + +var ( + ErrUnknownType = errors.New("Unknown field type in gen") + ErrUnknownDomain = errors.New("Unknown domain type in gen") +) + +var amqpTypeToNative = map[string]string{ + "bit": "bool", + "octet": "byte", + "shortshort": "uint8", + "short": "uint16", + "long": "uint32", + "longlong": "uint64", + "timestamp": "time.Time", + "table": "Table", + "shortstr": "string", + "longstr": "string", +} + +type Rule struct { + Name string `xml:"name,attr"` + Docs []string `xml:"doc"` +} + +type Doc struct { + Type string `xml:"type,attr"` + Body string `xml:",innerxml"` +} + +type Chassis struct { + Name string `xml:"name,attr"` + Implement string `xml:"implement,attr"` +} + +type Assert struct { + Check string `xml:"check,attr"` + Value string `xml:"value,attr"` + Method string `xml:"method,attr"` +} + +type Field struct { + Name string `xml:"name,attr"` + Domain string `xml:"domain,attr"` + Type string `xml:"type,attr"` + Label string `xml:"label,attr"` + Reserved bool `xml:"reserved,attr"` + Docs []Doc `xml:"doc"` + Asserts []Assert `xml:"assert"` +} + +type Response struct { + Name string `xml:"name,attr"` +} + +type Method struct { + Name string `xml:"name,attr"` + Response Response `xml:"response"` + Synchronous bool `xml:"synchronous,attr"` + Content bool `xml:"content,attr"` + Index string `xml:"index,attr"` + Label string `xml:"label,attr"` + Docs []Doc `xml:"doc"` + Rules []Rule `xml:"rule"` + Fields []Field `xml:"field"` + Chassis []Chassis `xml:"chassis"` +} + +type Class struct { + Name string `xml:"name,attr"` + Handler string `xml:"handler,attr"` + Index string `xml:"index,attr"` + Label string `xml:"label,attr"` + Docs []Doc `xml:"doc"` + Methods []Method `xml:"method"` + Chassis []Chassis `xml:"chassis"` +} + +type Domain struct { + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + Label string `xml:"label,attr"` + Rules []Rule `xml:"rule"` + Docs []Doc `xml:"doc"` +} + +type Constant struct { + Name string `xml:"name,attr"` + Value int `xml:"value,attr"` + Class string `xml:"class,attr"` + Doc string `xml:"doc"` +} + +type Amqp struct { + Major int `xml:"major,attr"` + Minor int `xml:"minor,attr"` + Port int `xml:"port,attr"` + Comment string `xml:"comment,attr"` + + Constants []Constant `xml:"constant"` + Domains []Domain `xml:"domain"` + Classes []Class `xml:"class"` +} + +type renderer struct { + Root Amqp + bitcounter int +} + +type fieldset struct { + AmqpType string + NativeType string + Fields []Field + *renderer +} + +var ( + helpers = template.FuncMap{ + "public": public, + "private": private, + "clean": clean, + } + + packageTemplate = template.Must(template.New("package").Funcs(helpers).Parse(` + // Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // Source code and contact info at http://github.com/streadway/amqp + + /* GENERATED FILE - DO NOT EDIT */ + /* Rebuild from the spec/gen.go tool */ + + {{with .Root}} + package amqp + + import ( + "fmt" + "encoding/binary" + "io" + ) + + // Error codes that can be sent from the server during a connection or + // channel exception or used by the client to indicate a class of error like + // ErrCredentials. The text of the error is likely more interesting than + // these constants. + const ( + {{range $c := .Constants}} + {{if $c.IsError}}{{.Name | public}}{{else}}{{.Name | private}}{{end}} = {{.Value}}{{end}} + ) + + func isSoftExceptionCode(code int) bool { + switch code { + {{range $c := .Constants}} {{if $c.IsSoftError}} case {{$c.Value}}: + return true + {{end}}{{end}} + } + return false + } + + {{range .Classes}} + {{$class := .}} + {{range .Methods}} + {{$method := .}} + {{$struct := $.StructName $class.Name $method.Name}} + {{if .Docs}}/* {{range .Docs}} {{.Body | clean}} {{end}} */{{end}} + type {{$struct}} struct { + {{range .Fields}} + {{$.FieldName .}} {{$.FieldType . | $.NativeType}} {{if .Label}}// {{.Label}}{{end}}{{end}} + {{if .Content}}Properties properties + Body []byte{{end}} + } + + func (me *{{$struct}}) id() (uint16, uint16) { + return {{$class.Index}}, {{$method.Index}} + } + + func (me *{{$struct}}) wait() (bool) { + return {{.Synchronous}}{{if $.HasField "NoWait" .}} && !me.NoWait{{end}} + } + + {{if .Content}} + func (me *{{$struct}}) getContent() (properties, []byte) { + return me.Properties, me.Body + } + + func (me *{{$struct}}) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body + } + {{end}} + func (me *{{$struct}}) write(w io.Writer) (err error) { + {{if $.HasType "bit" $method}}var bits byte{{end}} + {{.Fields | $.Fieldsets | $.Partial "enc-"}} + return + } + + func (me *{{$struct}}) read(r io.Reader) (err error) { + {{if $.HasType "bit" $method}}var bits byte{{end}} + {{.Fields | $.Fieldsets | $.Partial "dec-"}} + return + } + {{end}} + {{end}} + + func (me *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &methodFrame { + ChannelId: channel, + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + {{range .Classes}} + {{$class := .}} + case {{.Index}}: // {{.Name}} + switch mf.MethodId { + {{range .Methods}} + case {{.Index}}: // {{$class.Name}} {{.Name}} + //fmt.Println("NextMethod: class:{{$class.Index}} method:{{.Index}}") + method := &{{$.StructName $class.Name .Name}}{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + {{end}} + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + {{end}} + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil + } + {{end}} + + {{define "enc-bit"}} + {{range $off, $field := .Fields}} + if me.{{$field | $.FieldName}} { bits |= 1 << {{$off}} } + {{end}} + if err = binary.Write(w, binary.BigEndian, bits); err != nil { return } + {{end}} + {{define "enc-octet"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-shortshort"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-short"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-long"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-longlong"}} + {{range .Fields}} if err = binary.Write(w, binary.BigEndian, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-timestamp"}} + {{range .Fields}} if err = writeTimestamp(w, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-shortstr"}} + {{range .Fields}} if err = writeShortstr(w, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-longstr"}} + {{range .Fields}} if err = writeLongstr(w, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "enc-table"}} + {{range .Fields}} if err = writeTable(w, me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + + {{define "dec-bit"}} + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + {{range $off, $field := .Fields}} me.{{$field | $.FieldName}} = (bits & (1 << {{$off}}) > 0) + {{end}} + {{end}} + {{define "dec-octet"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-shortshort"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-short"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-long"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-longlong"}} + {{range .Fields}} if err = binary.Read(r, binary.BigEndian, &me.{{. | $.FieldName}}); err != nil { return } + {{end}} + {{end}} + {{define "dec-timestamp"}} + {{range .Fields}} if me.{{. | $.FieldName}}, err = readTimestamp(r); err != nil { return } + {{end}} + {{end}} + {{define "dec-shortstr"}} + {{range .Fields}} if me.{{. | $.FieldName}}, err = readShortstr(r); err != nil { return } + {{end}} + {{end}} + {{define "dec-longstr"}} + {{range .Fields}} if me.{{. | $.FieldName}}, err = readLongstr(r); err != nil { return } + {{end}} + {{end}} + {{define "dec-table"}} + {{range .Fields}} if me.{{. | $.FieldName}}, err = readTable(r); err != nil { return } + {{end}} + {{end}} + + `)) +) + +func (me *Constant) IsError() bool { + return strings.Contains(me.Class, "error") +} + +func (me *Constant) IsSoftError() bool { + return me.Class == "soft-error" +} + +func (me *renderer) Partial(prefix string, fields []fieldset) (s string, err error) { + var buf bytes.Buffer + for _, set := range fields { + name := prefix + set.AmqpType + t := packageTemplate.Lookup(name) + if t == nil { + return "", errors.New(fmt.Sprintf("Missing template: %s", name)) + } + if err = t.Execute(&buf, set); err != nil { + return + } + } + return string(buf.Bytes()), nil +} + +// Groups the fields so that the right encoder/decoder can be called +func (me *renderer) Fieldsets(fields []Field) (f []fieldset, err error) { + if len(fields) > 0 { + for _, field := range fields { + cur := fieldset{} + cur.AmqpType, err = me.FieldType(field) + if err != nil { + return + } + + cur.NativeType, err = me.NativeType(cur.AmqpType) + if err != nil { + return + } + cur.Fields = append(cur.Fields, field) + f = append(f, cur) + } + + i, j := 0, 1 + for j < len(f) { + if f[i].AmqpType == f[j].AmqpType { + f[i].Fields = append(f[i].Fields, f[j].Fields...) + } else { + i++ + f[i] = f[j] + } + j++ + } + return f[:i+1], nil + } + + return +} + +func (me *renderer) HasType(typ string, method Method) bool { + for _, f := range method.Fields { + name, _ := me.FieldType(f) + if name == typ { + return true + } + } + return false +} + +func (me *renderer) HasField(field string, method Method) bool { + for _, f := range method.Fields { + name := me.FieldName(f) + if name == field { + return true + } + } + return false +} + +func (me *renderer) Domain(field Field) (domain Domain, err error) { + for _, domain = range me.Root.Domains { + if field.Domain == domain.Name { + return + } + } + return domain, nil + //return domain, ErrUnknownDomain +} + +func (me *renderer) FieldName(field Field) (t string) { + t = public(field.Name) + + if field.Reserved { + t = strings.ToLower(t) + } + + return +} + +func (me *renderer) FieldType(field Field) (t string, err error) { + t = field.Type + + if t == "" { + var domain Domain + domain, err = me.Domain(field) + if err != nil { + return "", err + } + t = domain.Type + } + + return +} + +func (me *renderer) NativeType(amqpType string) (t string, err error) { + if t, ok := amqpTypeToNative[amqpType]; ok { + return t, nil + } + return "", ErrUnknownType +} + +func (me *renderer) Tag(d Domain) string { + label := "`" + + label += `domain:"` + d.Name + `"` + + if len(d.Type) > 0 { + label += `,type:"` + d.Type + `"` + } + + label += "`" + + return label +} + +func (me *renderer) StructName(parts ...string) string { + return parts[0] + public(parts[1:]...) +} + +func clean(body string) (res string) { + return strings.Replace(body, "\r", "", -1) +} + +func private(parts ...string) string { + return export(regexp.MustCompile(`[-_]\w`), parts...) +} + +func public(parts ...string) string { + return export(regexp.MustCompile(`^\w|[-_]\w`), parts...) +} + +func export(delim *regexp.Regexp, parts ...string) (res string) { + for _, in := range parts { + + res += delim.ReplaceAllStringFunc(in, func(match string) string { + switch len(match) { + case 1: + return strings.ToUpper(match) + case 2: + return strings.ToUpper(match[1:]) + } + panic("unreachable") + }) + } + + return +} + +func main() { + var r renderer + + spec, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Fatalln("Please pass spec on stdin", err) + } + + err = xml.Unmarshal(spec, &r.Root) + + if err != nil { + log.Fatalln("Could not parse XML:", err) + } + + if err = packageTemplate.Execute(os.Stdout, &r); err != nil { + log.Fatalln("Generate error: ", err) + } +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/spec091.go b/Godeps/_workspace/src/github.com/streadway/amqp/spec091.go new file mode 100644 index 000000000..a95380303 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/spec091.go @@ -0,0 +1,3306 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* GENERATED FILE - DO NOT EDIT */ +/* Rebuild from the spec/gen.go tool */ + +package amqp + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Error codes that can be sent from the server during a connection or +// channel exception or used by the client to indicate a class of error like +// ErrCredentials. The text of the error is likely more interesting than +// these constants. +const ( + frameMethod = 1 + frameHeader = 2 + frameBody = 3 + frameHeartbeat = 8 + frameMinSize = 4096 + frameEnd = 206 + replySuccess = 200 + ContentTooLarge = 311 + NoRoute = 312 + NoConsumers = 313 + ConnectionForced = 320 + InvalidPath = 402 + AccessRefused = 403 + NotFound = 404 + ResourceLocked = 405 + PreconditionFailed = 406 + FrameError = 501 + SyntaxError = 502 + CommandInvalid = 503 + ChannelError = 504 + UnexpectedFrame = 505 + ResourceError = 506 + NotAllowed = 530 + NotImplemented = 540 + InternalError = 541 +) + +func isSoftExceptionCode(code int) bool { + switch code { + case 311: + return true + case 312: + return true + case 313: + return true + case 403: + return true + case 404: + return true + case 405: + return true + case 406: + return true + + } + return false +} + +type connectionStart struct { + VersionMajor byte + VersionMinor byte + ServerProperties Table + Mechanisms string + Locales string +} + +func (me *connectionStart) id() (uint16, uint16) { + return 10, 10 +} + +func (me *connectionStart) wait() bool { + return true +} + +func (me *connectionStart) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.VersionMajor); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.VersionMinor); err != nil { + return + } + + if err = writeTable(w, me.ServerProperties); err != nil { + return + } + + if err = writeLongstr(w, me.Mechanisms); err != nil { + return + } + if err = writeLongstr(w, me.Locales); err != nil { + return + } + + return +} + +func (me *connectionStart) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.VersionMajor); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.VersionMinor); err != nil { + return + } + + if me.ServerProperties, err = readTable(r); err != nil { + return + } + + if me.Mechanisms, err = readLongstr(r); err != nil { + return + } + if me.Locales, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionStartOk struct { + ClientProperties Table + Mechanism string + Response string + Locale string +} + +func (me *connectionStartOk) id() (uint16, uint16) { + return 10, 11 +} + +func (me *connectionStartOk) wait() bool { + return true +} + +func (me *connectionStartOk) write(w io.Writer) (err error) { + + if err = writeTable(w, me.ClientProperties); err != nil { + return + } + + if err = writeShortstr(w, me.Mechanism); err != nil { + return + } + + if err = writeLongstr(w, me.Response); err != nil { + return + } + + if err = writeShortstr(w, me.Locale); err != nil { + return + } + + return +} + +func (me *connectionStartOk) read(r io.Reader) (err error) { + + if me.ClientProperties, err = readTable(r); err != nil { + return + } + + if me.Mechanism, err = readShortstr(r); err != nil { + return + } + + if me.Response, err = readLongstr(r); err != nil { + return + } + + if me.Locale, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionSecure struct { + Challenge string +} + +func (me *connectionSecure) id() (uint16, uint16) { + return 10, 20 +} + +func (me *connectionSecure) wait() bool { + return true +} + +func (me *connectionSecure) write(w io.Writer) (err error) { + + if err = writeLongstr(w, me.Challenge); err != nil { + return + } + + return +} + +func (me *connectionSecure) read(r io.Reader) (err error) { + + if me.Challenge, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionSecureOk struct { + Response string +} + +func (me *connectionSecureOk) id() (uint16, uint16) { + return 10, 21 +} + +func (me *connectionSecureOk) wait() bool { + return true +} + +func (me *connectionSecureOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, me.Response); err != nil { + return + } + + return +} + +func (me *connectionSecureOk) read(r io.Reader) (err error) { + + if me.Response, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionTune struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (me *connectionTune) id() (uint16, uint16) { + return 10, 30 +} + +func (me *connectionTune) wait() bool { + return true +} + +func (me *connectionTune) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.Heartbeat); err != nil { + return + } + + return +} + +func (me *connectionTune) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.Heartbeat); err != nil { + return + } + + return +} + +type connectionTuneOk struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (me *connectionTuneOk) id() (uint16, uint16) { + return 10, 31 +} + +func (me *connectionTuneOk) wait() bool { + return true +} + +func (me *connectionTuneOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.Heartbeat); err != nil { + return + } + + return +} + +func (me *connectionTuneOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.Heartbeat); err != nil { + return + } + + return +} + +type connectionOpen struct { + VirtualHost string + reserved1 string + reserved2 bool +} + +func (me *connectionOpen) id() (uint16, uint16) { + return 10, 40 +} + +func (me *connectionOpen) wait() bool { + return true +} + +func (me *connectionOpen) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, me.VirtualHost); err != nil { + return + } + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + if me.reserved2 { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *connectionOpen) read(r io.Reader) (err error) { + var bits byte + + if me.VirtualHost, err = readShortstr(r); err != nil { + return + } + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.reserved2 = (bits&(1<<0) > 0) + + return +} + +type connectionOpenOk struct { + reserved1 string +} + +func (me *connectionOpenOk) id() (uint16, uint16) { + return 10, 41 +} + +func (me *connectionOpenOk) wait() bool { + return true +} + +func (me *connectionOpenOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *connectionOpenOk) read(r io.Reader) (err error) { + + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (me *connectionClose) id() (uint16, uint16) { + return 10, 50 +} + +func (me *connectionClose) wait() bool { + return true +} + +func (me *connectionClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, me.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.MethodId); err != nil { + return + } + + return +} + +func (me *connectionClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ReplyCode); err != nil { + return + } + + if me.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.MethodId); err != nil { + return + } + + return +} + +type connectionCloseOk struct { +} + +func (me *connectionCloseOk) id() (uint16, uint16) { + return 10, 51 +} + +func (me *connectionCloseOk) wait() bool { + return true +} + +func (me *connectionCloseOk) write(w io.Writer) (err error) { + + return +} + +func (me *connectionCloseOk) read(r io.Reader) (err error) { + + return +} + +type connectionBlocked struct { + Reason string +} + +func (me *connectionBlocked) id() (uint16, uint16) { + return 10, 60 +} + +func (me *connectionBlocked) wait() bool { + return false +} + +func (me *connectionBlocked) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.Reason); err != nil { + return + } + + return +} + +func (me *connectionBlocked) read(r io.Reader) (err error) { + + if me.Reason, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionUnblocked struct { +} + +func (me *connectionUnblocked) id() (uint16, uint16) { + return 10, 61 +} + +func (me *connectionUnblocked) wait() bool { + return false +} + +func (me *connectionUnblocked) write(w io.Writer) (err error) { + + return +} + +func (me *connectionUnblocked) read(r io.Reader) (err error) { + + return +} + +type channelOpen struct { + reserved1 string +} + +func (me *channelOpen) id() (uint16, uint16) { + return 20, 10 +} + +func (me *channelOpen) wait() bool { + return true +} + +func (me *channelOpen) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *channelOpen) read(r io.Reader) (err error) { + + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type channelOpenOk struct { + reserved1 string +} + +func (me *channelOpenOk) id() (uint16, uint16) { + return 20, 11 +} + +func (me *channelOpenOk) wait() bool { + return true +} + +func (me *channelOpenOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *channelOpenOk) read(r io.Reader) (err error) { + + if me.reserved1, err = readLongstr(r); err != nil { + return + } + + return +} + +type channelFlow struct { + Active bool +} + +func (me *channelFlow) id() (uint16, uint16) { + return 20, 20 +} + +func (me *channelFlow) wait() bool { + return true +} + +func (me *channelFlow) write(w io.Writer) (err error) { + var bits byte + + if me.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *channelFlow) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Active = (bits&(1<<0) > 0) + + return +} + +type channelFlowOk struct { + Active bool +} + +func (me *channelFlowOk) id() (uint16, uint16) { + return 20, 21 +} + +func (me *channelFlowOk) wait() bool { + return false +} + +func (me *channelFlowOk) write(w io.Writer) (err error) { + var bits byte + + if me.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *channelFlowOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Active = (bits&(1<<0) > 0) + + return +} + +type channelClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (me *channelClose) id() (uint16, uint16) { + return 20, 40 +} + +func (me *channelClose) wait() bool { + return true +} + +func (me *channelClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, me.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.MethodId); err != nil { + return + } + + return +} + +func (me *channelClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ReplyCode); err != nil { + return + } + + if me.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.MethodId); err != nil { + return + } + + return +} + +type channelCloseOk struct { +} + +func (me *channelCloseOk) id() (uint16, uint16) { + return 20, 41 +} + +func (me *channelCloseOk) wait() bool { + return true +} + +func (me *channelCloseOk) write(w io.Writer) (err error) { + + return +} + +func (me *channelCloseOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDeclare struct { + reserved1 uint16 + Exchange string + Type string + Passive bool + Durable bool + AutoDelete bool + Internal bool + NoWait bool + Arguments Table +} + +func (me *exchangeDeclare) id() (uint16, uint16) { + return 40, 10 +} + +func (me *exchangeDeclare) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.Type); err != nil { + return + } + + if me.Passive { + bits |= 1 << 0 + } + + if me.Durable { + bits |= 1 << 1 + } + + if me.AutoDelete { + bits |= 1 << 2 + } + + if me.Internal { + bits |= 1 << 3 + } + + if me.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *exchangeDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.Type, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Passive = (bits&(1<<0) > 0) + me.Durable = (bits&(1<<1) > 0) + me.AutoDelete = (bits&(1<<2) > 0) + me.Internal = (bits&(1<<3) > 0) + me.NoWait = (bits&(1<<4) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeDeclareOk struct { +} + +func (me *exchangeDeclareOk) id() (uint16, uint16) { + return 40, 11 +} + +func (me *exchangeDeclareOk) wait() bool { + return true +} + +func (me *exchangeDeclareOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeDeclareOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDelete struct { + reserved1 uint16 + Exchange string + IfUnused bool + NoWait bool +} + +func (me *exchangeDelete) id() (uint16, uint16) { + return 40, 20 +} + +func (me *exchangeDelete) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + + if me.IfUnused { + bits |= 1 << 0 + } + + if me.NoWait { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *exchangeDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.IfUnused = (bits&(1<<0) > 0) + me.NoWait = (bits&(1<<1) > 0) + + return +} + +type exchangeDeleteOk struct { +} + +func (me *exchangeDeleteOk) id() (uint16, uint16) { + return 40, 21 +} + +func (me *exchangeDeleteOk) wait() bool { + return true +} + +func (me *exchangeDeleteOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeDeleteOk) read(r io.Reader) (err error) { + + return +} + +type exchangeBind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (me *exchangeBind) id() (uint16, uint16) { + return 40, 30 +} + +func (me *exchangeBind) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Destination); err != nil { + return + } + if err = writeShortstr(w, me.Source); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *exchangeBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Destination, err = readShortstr(r); err != nil { + return + } + if me.Source, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeBindOk struct { +} + +func (me *exchangeBindOk) id() (uint16, uint16) { + return 40, 31 +} + +func (me *exchangeBindOk) wait() bool { + return true +} + +func (me *exchangeBindOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeBindOk) read(r io.Reader) (err error) { + + return +} + +type exchangeUnbind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (me *exchangeUnbind) id() (uint16, uint16) { + return 40, 40 +} + +func (me *exchangeUnbind) wait() bool { + return true && !me.NoWait +} + +func (me *exchangeUnbind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Destination); err != nil { + return + } + if err = writeShortstr(w, me.Source); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *exchangeUnbind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Destination, err = readShortstr(r); err != nil { + return + } + if me.Source, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeUnbindOk struct { +} + +func (me *exchangeUnbindOk) id() (uint16, uint16) { + return 40, 51 +} + +func (me *exchangeUnbindOk) wait() bool { + return true +} + +func (me *exchangeUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (me *exchangeUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queueDeclare struct { + reserved1 uint16 + Queue string + Passive bool + Durable bool + Exclusive bool + AutoDelete bool + NoWait bool + Arguments Table +} + +func (me *queueDeclare) id() (uint16, uint16) { + return 50, 10 +} + +func (me *queueDeclare) wait() bool { + return true && !me.NoWait +} + +func (me *queueDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.Passive { + bits |= 1 << 0 + } + + if me.Durable { + bits |= 1 << 1 + } + + if me.Exclusive { + bits |= 1 << 2 + } + + if me.AutoDelete { + bits |= 1 << 3 + } + + if me.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *queueDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Passive = (bits&(1<<0) > 0) + me.Durable = (bits&(1<<1) > 0) + me.Exclusive = (bits&(1<<2) > 0) + me.AutoDelete = (bits&(1<<3) > 0) + me.NoWait = (bits&(1<<4) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueDeclareOk struct { + Queue string + MessageCount uint32 + ConsumerCount uint32 +} + +func (me *queueDeclareOk) id() (uint16, uint16) { + return 50, 11 +} + +func (me *queueDeclareOk) wait() bool { + return true +} + +func (me *queueDeclareOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, me.ConsumerCount); err != nil { + return + } + + return +} + +func (me *queueDeclareOk) read(r io.Reader) (err error) { + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &me.ConsumerCount); err != nil { + return + } + + return +} + +type queueBind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + NoWait bool + Arguments Table +} + +func (me *queueBind) id() (uint16, uint16) { + return 50, 20 +} + +func (me *queueBind) wait() bool { + return true && !me.NoWait +} + +func (me *queueBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *queueBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueBindOk struct { +} + +func (me *queueBindOk) id() (uint16, uint16) { + return 50, 21 +} + +func (me *queueBindOk) wait() bool { + return true +} + +func (me *queueBindOk) write(w io.Writer) (err error) { + + return +} + +func (me *queueBindOk) read(r io.Reader) (err error) { + + return +} + +type queueUnbind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + Arguments Table +} + +func (me *queueUnbind) id() (uint16, uint16) { + return 50, 50 +} + +func (me *queueUnbind) wait() bool { + return true +} + +func (me *queueUnbind) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *queueUnbind) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueUnbindOk struct { +} + +func (me *queueUnbindOk) id() (uint16, uint16) { + return 50, 51 +} + +func (me *queueUnbindOk) wait() bool { + return true +} + +func (me *queueUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (me *queueUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queuePurge struct { + reserved1 uint16 + Queue string + NoWait bool +} + +func (me *queuePurge) id() (uint16, uint16) { + return 50, 30 +} + +func (me *queuePurge) wait() bool { + return true && !me.NoWait +} + +func (me *queuePurge) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *queuePurge) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + return +} + +type queuePurgeOk struct { + MessageCount uint32 +} + +func (me *queuePurgeOk) id() (uint16, uint16) { + return 50, 31 +} + +func (me *queuePurgeOk) wait() bool { + return true +} + +func (me *queuePurgeOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + + return +} + +func (me *queuePurgeOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + + return +} + +type queueDelete struct { + reserved1 uint16 + Queue string + IfUnused bool + IfEmpty bool + NoWait bool +} + +func (me *queueDelete) id() (uint16, uint16) { + return 50, 40 +} + +func (me *queueDelete) wait() bool { + return true && !me.NoWait +} + +func (me *queueDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.IfUnused { + bits |= 1 << 0 + } + + if me.IfEmpty { + bits |= 1 << 1 + } + + if me.NoWait { + bits |= 1 << 2 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *queueDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.IfUnused = (bits&(1<<0) > 0) + me.IfEmpty = (bits&(1<<1) > 0) + me.NoWait = (bits&(1<<2) > 0) + + return +} + +type queueDeleteOk struct { + MessageCount uint32 +} + +func (me *queueDeleteOk) id() (uint16, uint16) { + return 50, 41 +} + +func (me *queueDeleteOk) wait() bool { + return true +} + +func (me *queueDeleteOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + + return +} + +func (me *queueDeleteOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + + return +} + +type basicQos struct { + PrefetchSize uint32 + PrefetchCount uint16 + Global bool +} + +func (me *basicQos) id() (uint16, uint16) { + return 60, 10 +} + +func (me *basicQos) wait() bool { + return true +} + +func (me *basicQos) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.PrefetchSize); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.PrefetchCount); err != nil { + return + } + + if me.Global { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicQos) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.PrefetchSize); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.PrefetchCount); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Global = (bits&(1<<0) > 0) + + return +} + +type basicQosOk struct { +} + +func (me *basicQosOk) id() (uint16, uint16) { + return 60, 11 +} + +func (me *basicQosOk) wait() bool { + return true +} + +func (me *basicQosOk) write(w io.Writer) (err error) { + + return +} + +func (me *basicQosOk) read(r io.Reader) (err error) { + + return +} + +type basicConsume struct { + reserved1 uint16 + Queue string + ConsumerTag string + NoLocal bool + NoAck bool + Exclusive bool + NoWait bool + Arguments Table +} + +func (me *basicConsume) id() (uint16, uint16) { + return 60, 20 +} + +func (me *basicConsume) wait() bool { + return true && !me.NoWait +} + +func (me *basicConsume) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + if me.NoLocal { + bits |= 1 << 0 + } + + if me.NoAck { + bits |= 1 << 1 + } + + if me.Exclusive { + bits |= 1 << 2 + } + + if me.NoWait { + bits |= 1 << 3 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, me.Arguments); err != nil { + return + } + + return +} + +func (me *basicConsume) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoLocal = (bits&(1<<0) > 0) + me.NoAck = (bits&(1<<1) > 0) + me.Exclusive = (bits&(1<<2) > 0) + me.NoWait = (bits&(1<<3) > 0) + + if me.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type basicConsumeOk struct { + ConsumerTag string +} + +func (me *basicConsumeOk) id() (uint16, uint16) { + return 60, 21 +} + +func (me *basicConsumeOk) wait() bool { + return true +} + +func (me *basicConsumeOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + return +} + +func (me *basicConsumeOk) read(r io.Reader) (err error) { + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicCancel struct { + ConsumerTag string + NoWait bool +} + +func (me *basicCancel) id() (uint16, uint16) { + return 60, 30 +} + +func (me *basicCancel) wait() bool { + return true && !me.NoWait +} + +func (me *basicCancel) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + if me.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicCancel) read(r io.Reader) (err error) { + var bits byte + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoWait = (bits&(1<<0) > 0) + + return +} + +type basicCancelOk struct { + ConsumerTag string +} + +func (me *basicCancelOk) id() (uint16, uint16) { + return 60, 31 +} + +func (me *basicCancelOk) wait() bool { + return true +} + +func (me *basicCancelOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + return +} + +func (me *basicCancelOk) read(r io.Reader) (err error) { + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicPublish struct { + reserved1 uint16 + Exchange string + RoutingKey string + Mandatory bool + Immediate bool + Properties properties + Body []byte +} + +func (me *basicPublish) id() (uint16, uint16) { + return 60, 40 +} + +func (me *basicPublish) wait() bool { + return false +} + +func (me *basicPublish) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicPublish) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicPublish) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if me.Mandatory { + bits |= 1 << 0 + } + + if me.Immediate { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicPublish) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Mandatory = (bits&(1<<0) > 0) + me.Immediate = (bits&(1<<1) > 0) + + return +} + +type basicReturn struct { + ReplyCode uint16 + ReplyText string + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (me *basicReturn) id() (uint16, uint16) { + return 60, 50 +} + +func (me *basicReturn) wait() bool { + return false +} + +func (me *basicReturn) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicReturn) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicReturn) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, me.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, me.ReplyText); err != nil { + return + } + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + return +} + +func (me *basicReturn) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &me.ReplyCode); err != nil { + return + } + + if me.ReplyText, err = readShortstr(r); err != nil { + return + } + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicDeliver struct { + ConsumerTag string + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (me *basicDeliver) id() (uint16, uint16) { + return 60, 60 +} + +func (me *basicDeliver) wait() bool { + return false +} + +func (me *basicDeliver) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicDeliver) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicDeliver) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, me.ConsumerTag); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + return +} + +func (me *basicDeliver) read(r io.Reader) (err error) { + var bits byte + + if me.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Redelivered = (bits&(1<<0) > 0) + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicGet struct { + reserved1 uint16 + Queue string + NoAck bool +} + +func (me *basicGet) id() (uint16, uint16) { + return 60, 70 +} + +func (me *basicGet) wait() bool { + return true +} + +func (me *basicGet) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.reserved1); err != nil { + return + } + + if err = writeShortstr(w, me.Queue); err != nil { + return + } + + if me.NoAck { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicGet) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.reserved1); err != nil { + return + } + + if me.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.NoAck = (bits&(1<<0) > 0) + + return +} + +type basicGetOk struct { + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + MessageCount uint32 + Properties properties + Body []byte +} + +func (me *basicGetOk) id() (uint16, uint16) { + return 60, 71 +} + +func (me *basicGetOk) wait() bool { + return true +} + +func (me *basicGetOk) getContent() (properties, []byte) { + return me.Properties, me.Body +} + +func (me *basicGetOk) setContent(props properties, body []byte) { + me.Properties, me.Body = props, body +} + +func (me *basicGetOk) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, me.Exchange); err != nil { + return + } + if err = writeShortstr(w, me.RoutingKey); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, me.MessageCount); err != nil { + return + } + + return +} + +func (me *basicGetOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Redelivered = (bits&(1<<0) > 0) + + if me.Exchange, err = readShortstr(r); err != nil { + return + } + if me.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &me.MessageCount); err != nil { + return + } + + return +} + +type basicGetEmpty struct { + reserved1 string +} + +func (me *basicGetEmpty) id() (uint16, uint16) { + return 60, 72 +} + +func (me *basicGetEmpty) wait() bool { + return true +} + +func (me *basicGetEmpty) write(w io.Writer) (err error) { + + if err = writeShortstr(w, me.reserved1); err != nil { + return + } + + return +} + +func (me *basicGetEmpty) read(r io.Reader) (err error) { + + if me.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicAck struct { + DeliveryTag uint64 + Multiple bool +} + +func (me *basicAck) id() (uint16, uint16) { + return 60, 80 +} + +func (me *basicAck) wait() bool { + return false +} + +func (me *basicAck) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Multiple { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicAck) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Multiple = (bits&(1<<0) > 0) + + return +} + +type basicReject struct { + DeliveryTag uint64 + Requeue bool +} + +func (me *basicReject) id() (uint16, uint16) { + return 60, 90 +} + +func (me *basicReject) wait() bool { + return false +} + +func (me *basicReject) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicReject) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverAsync struct { + Requeue bool +} + +func (me *basicRecoverAsync) id() (uint16, uint16) { + return 60, 100 +} + +func (me *basicRecoverAsync) wait() bool { + return false +} + +func (me *basicRecoverAsync) write(w io.Writer) (err error) { + var bits byte + + if me.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicRecoverAsync) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecover struct { + Requeue bool +} + +func (me *basicRecover) id() (uint16, uint16) { + return 60, 110 +} + +func (me *basicRecover) wait() bool { + return true +} + +func (me *basicRecover) write(w io.Writer) (err error) { + var bits byte + + if me.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicRecover) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverOk struct { +} + +func (me *basicRecoverOk) id() (uint16, uint16) { + return 60, 111 +} + +func (me *basicRecoverOk) wait() bool { + return true +} + +func (me *basicRecoverOk) write(w io.Writer) (err error) { + + return +} + +func (me *basicRecoverOk) read(r io.Reader) (err error) { + + return +} + +type basicNack struct { + DeliveryTag uint64 + Multiple bool + Requeue bool +} + +func (me *basicNack) id() (uint16, uint16) { + return 60, 120 +} + +func (me *basicNack) wait() bool { + return false +} + +func (me *basicNack) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, me.DeliveryTag); err != nil { + return + } + + if me.Multiple { + bits |= 1 << 0 + } + + if me.Requeue { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *basicNack) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &me.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Multiple = (bits&(1<<0) > 0) + me.Requeue = (bits&(1<<1) > 0) + + return +} + +type txSelect struct { +} + +func (me *txSelect) id() (uint16, uint16) { + return 90, 10 +} + +func (me *txSelect) wait() bool { + return true +} + +func (me *txSelect) write(w io.Writer) (err error) { + + return +} + +func (me *txSelect) read(r io.Reader) (err error) { + + return +} + +type txSelectOk struct { +} + +func (me *txSelectOk) id() (uint16, uint16) { + return 90, 11 +} + +func (me *txSelectOk) wait() bool { + return true +} + +func (me *txSelectOk) write(w io.Writer) (err error) { + + return +} + +func (me *txSelectOk) read(r io.Reader) (err error) { + + return +} + +type txCommit struct { +} + +func (me *txCommit) id() (uint16, uint16) { + return 90, 20 +} + +func (me *txCommit) wait() bool { + return true +} + +func (me *txCommit) write(w io.Writer) (err error) { + + return +} + +func (me *txCommit) read(r io.Reader) (err error) { + + return +} + +type txCommitOk struct { +} + +func (me *txCommitOk) id() (uint16, uint16) { + return 90, 21 +} + +func (me *txCommitOk) wait() bool { + return true +} + +func (me *txCommitOk) write(w io.Writer) (err error) { + + return +} + +func (me *txCommitOk) read(r io.Reader) (err error) { + + return +} + +type txRollback struct { +} + +func (me *txRollback) id() (uint16, uint16) { + return 90, 30 +} + +func (me *txRollback) wait() bool { + return true +} + +func (me *txRollback) write(w io.Writer) (err error) { + + return +} + +func (me *txRollback) read(r io.Reader) (err error) { + + return +} + +type txRollbackOk struct { +} + +func (me *txRollbackOk) id() (uint16, uint16) { + return 90, 31 +} + +func (me *txRollbackOk) wait() bool { + return true +} + +func (me *txRollbackOk) write(w io.Writer) (err error) { + + return +} + +func (me *txRollbackOk) read(r io.Reader) (err error) { + + return +} + +type confirmSelect struct { + Nowait bool +} + +func (me *confirmSelect) id() (uint16, uint16) { + return 85, 10 +} + +func (me *confirmSelect) wait() bool { + return true +} + +func (me *confirmSelect) write(w io.Writer) (err error) { + var bits byte + + if me.Nowait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (me *confirmSelect) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + me.Nowait = (bits&(1<<0) > 0) + + return +} + +type confirmSelectOk struct { +} + +func (me *confirmSelectOk) id() (uint16, uint16) { + return 85, 11 +} + +func (me *confirmSelectOk) wait() bool { + return true +} + +func (me *confirmSelectOk) write(w io.Writer) (err error) { + + return +} + +func (me *confirmSelectOk) read(r io.Reader) (err error) { + + return +} + +func (me *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &methodFrame{ + ChannelId: channel, + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(me.r, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + + case 10: // connection + switch mf.MethodId { + + case 10: // connection start + //fmt.Println("NextMethod: class:10 method:10") + method := &connectionStart{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // connection start-ok + //fmt.Println("NextMethod: class:10 method:11") + method := &connectionStartOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // connection secure + //fmt.Println("NextMethod: class:10 method:20") + method := &connectionSecure{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // connection secure-ok + //fmt.Println("NextMethod: class:10 method:21") + method := &connectionSecureOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // connection tune + //fmt.Println("NextMethod: class:10 method:30") + method := &connectionTune{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // connection tune-ok + //fmt.Println("NextMethod: class:10 method:31") + method := &connectionTuneOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // connection open + //fmt.Println("NextMethod: class:10 method:40") + method := &connectionOpen{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 41: // connection open-ok + //fmt.Println("NextMethod: class:10 method:41") + method := &connectionOpenOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 50: // connection close + //fmt.Println("NextMethod: class:10 method:50") + method := &connectionClose{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 51: // connection close-ok + //fmt.Println("NextMethod: class:10 method:51") + method := &connectionCloseOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 60: // connection blocked + //fmt.Println("NextMethod: class:10 method:60") + method := &connectionBlocked{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 61: // connection unblocked + //fmt.Println("NextMethod: class:10 method:61") + method := &connectionUnblocked{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 20: // channel + switch mf.MethodId { + + case 10: // channel open + //fmt.Println("NextMethod: class:20 method:10") + method := &channelOpen{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // channel open-ok + //fmt.Println("NextMethod: class:20 method:11") + method := &channelOpenOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // channel flow + //fmt.Println("NextMethod: class:20 method:20") + method := &channelFlow{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // channel flow-ok + //fmt.Println("NextMethod: class:20 method:21") + method := &channelFlowOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // channel close + //fmt.Println("NextMethod: class:20 method:40") + method := &channelClose{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 41: // channel close-ok + //fmt.Println("NextMethod: class:20 method:41") + method := &channelCloseOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 40: // exchange + switch mf.MethodId { + + case 10: // exchange declare + //fmt.Println("NextMethod: class:40 method:10") + method := &exchangeDeclare{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // exchange declare-ok + //fmt.Println("NextMethod: class:40 method:11") + method := &exchangeDeclareOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // exchange delete + //fmt.Println("NextMethod: class:40 method:20") + method := &exchangeDelete{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // exchange delete-ok + //fmt.Println("NextMethod: class:40 method:21") + method := &exchangeDeleteOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // exchange bind + //fmt.Println("NextMethod: class:40 method:30") + method := &exchangeBind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // exchange bind-ok + //fmt.Println("NextMethod: class:40 method:31") + method := &exchangeBindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // exchange unbind + //fmt.Println("NextMethod: class:40 method:40") + method := &exchangeUnbind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 51: // exchange unbind-ok + //fmt.Println("NextMethod: class:40 method:51") + method := &exchangeUnbindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 50: // queue + switch mf.MethodId { + + case 10: // queue declare + //fmt.Println("NextMethod: class:50 method:10") + method := &queueDeclare{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // queue declare-ok + //fmt.Println("NextMethod: class:50 method:11") + method := &queueDeclareOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // queue bind + //fmt.Println("NextMethod: class:50 method:20") + method := &queueBind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // queue bind-ok + //fmt.Println("NextMethod: class:50 method:21") + method := &queueBindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 50: // queue unbind + //fmt.Println("NextMethod: class:50 method:50") + method := &queueUnbind{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 51: // queue unbind-ok + //fmt.Println("NextMethod: class:50 method:51") + method := &queueUnbindOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // queue purge + //fmt.Println("NextMethod: class:50 method:30") + method := &queuePurge{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // queue purge-ok + //fmt.Println("NextMethod: class:50 method:31") + method := &queuePurgeOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // queue delete + //fmt.Println("NextMethod: class:50 method:40") + method := &queueDelete{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 41: // queue delete-ok + //fmt.Println("NextMethod: class:50 method:41") + method := &queueDeleteOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 60: // basic + switch mf.MethodId { + + case 10: // basic qos + //fmt.Println("NextMethod: class:60 method:10") + method := &basicQos{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // basic qos-ok + //fmt.Println("NextMethod: class:60 method:11") + method := &basicQosOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // basic consume + //fmt.Println("NextMethod: class:60 method:20") + method := &basicConsume{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // basic consume-ok + //fmt.Println("NextMethod: class:60 method:21") + method := &basicConsumeOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // basic cancel + //fmt.Println("NextMethod: class:60 method:30") + method := &basicCancel{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // basic cancel-ok + //fmt.Println("NextMethod: class:60 method:31") + method := &basicCancelOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 40: // basic publish + //fmt.Println("NextMethod: class:60 method:40") + method := &basicPublish{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 50: // basic return + //fmt.Println("NextMethod: class:60 method:50") + method := &basicReturn{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 60: // basic deliver + //fmt.Println("NextMethod: class:60 method:60") + method := &basicDeliver{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 70: // basic get + //fmt.Println("NextMethod: class:60 method:70") + method := &basicGet{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 71: // basic get-ok + //fmt.Println("NextMethod: class:60 method:71") + method := &basicGetOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 72: // basic get-empty + //fmt.Println("NextMethod: class:60 method:72") + method := &basicGetEmpty{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 80: // basic ack + //fmt.Println("NextMethod: class:60 method:80") + method := &basicAck{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 90: // basic reject + //fmt.Println("NextMethod: class:60 method:90") + method := &basicReject{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 100: // basic recover-async + //fmt.Println("NextMethod: class:60 method:100") + method := &basicRecoverAsync{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 110: // basic recover + //fmt.Println("NextMethod: class:60 method:110") + method := &basicRecover{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 111: // basic recover-ok + //fmt.Println("NextMethod: class:60 method:111") + method := &basicRecoverOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 120: // basic nack + //fmt.Println("NextMethod: class:60 method:120") + method := &basicNack{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 90: // tx + switch mf.MethodId { + + case 10: // tx select + //fmt.Println("NextMethod: class:90 method:10") + method := &txSelect{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // tx select-ok + //fmt.Println("NextMethod: class:90 method:11") + method := &txSelectOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 20: // tx commit + //fmt.Println("NextMethod: class:90 method:20") + method := &txCommit{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 21: // tx commit-ok + //fmt.Println("NextMethod: class:90 method:21") + method := &txCommitOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 30: // tx rollback + //fmt.Println("NextMethod: class:90 method:30") + method := &txRollback{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 31: // tx rollback-ok + //fmt.Println("NextMethod: class:90 method:31") + method := &txRollbackOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 85: // confirm + switch mf.MethodId { + + case 10: // confirm select + //fmt.Println("NextMethod: class:85 method:10") + method := &confirmSelect{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + case 11: // confirm select-ok + //fmt.Println("NextMethod: class:85 method:11") + method := &confirmSelectOk{} + if err = method.read(me.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/tls_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/tls_test.go new file mode 100644 index 000000000..a0795b641 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/tls_test.go @@ -0,0 +1,218 @@ +package amqp_test + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "github.com/streadway/amqp" + "io" + "net" + "testing" + "time" +) + +type tlsServer struct { + net.Listener + URL string + Config *tls.Config + Header chan []byte +} + +// Captures the header for each accepted connection +func (s *tlsServer) Serve() { + for { + c, err := s.Accept() + if err != nil { + return + } + + header := make([]byte, 4) + io.ReadFull(c, header) + s.Header <- header + c.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 0, 0}) + c.Close() + } +} + +func tlsConfig() *tls.Config { + cfg := new(tls.Config) + + cfg.ClientCAs = x509.NewCertPool() + cfg.ClientCAs.AppendCertsFromPEM([]byte(caCert)) + + cert, err := tls.X509KeyPair([]byte(serverCert), []byte(serverKey)) + if err != nil { + panic(err) + } + + cfg.Certificates = append(cfg.Certificates, cert) + cfg.ClientAuth = tls.RequireAndVerifyClientCert + + return cfg +} + +func startTlsServer() tlsServer { + cfg := tlsConfig() + + l, err := tls.Listen("tcp", "127.0.0.1:0", cfg) + if err != nil { + panic(err) + } + + s := tlsServer{ + Listener: l, + Config: cfg, + URL: fmt.Sprintf("amqps://%s/", l.Addr().String()), + Header: make(chan []byte, 1), + } + + go s.Serve() + return s +} + +// Tests that the server has handshaked the connection and seen the client +// protocol announcement. Does not nest that the connection.open is successful. +func TestTLSHandshake(t *testing.T) { + srv := startTlsServer() + defer srv.Close() + + cfg := new(tls.Config) + cfg.RootCAs = x509.NewCertPool() + cfg.RootCAs.AppendCertsFromPEM([]byte(caCert)) + + cert, _ := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) + cfg.Certificates = append(cfg.Certificates, cert) + + _, err := amqp.DialTLS(srv.URL, cfg) + + select { + case <-time.After(10 * time.Millisecond): + t.Fatalf("did not succeed to handshake the TLS connection after 10ms") + case header := <-srv.Header: + if string(header) != "AMQP" { + t.Fatalf("expected to handshake a TLS connection, got err: %v", err) + } + } +} + +const caCert = ` +-----BEGIN CERTIFICATE----- +MIICxjCCAa6gAwIBAgIJANWuMWMQSxvdMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV +BAMTCE15VGVzdENBMB4XDTE0MDEyNzE5NTIyMloXDTI0MDEyNTE5NTIyMlowEzER +MA8GA1UEAxMITXlUZXN0Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDBsIrkW4ob9Z/gzR2/Maa2stbutry6/vvz8eiJwIKIbaHGwqtFOUGiWeKw7H76 +IH3SjTAhNQY2hoKPyH41D36sDJkYBRyHFJTK/6ffvOhpyLnuXJAnoS62eKPSNUAx +5i/lkHj42ESutYAH9qbHCI/gBm9G4WmhGAyA16xzC1n07JObl6KFoY1PqHKl823z +mvF47I24DzemEfjdwC9nAAX/pGYOg9FA9nQv7NnhlsJMxueCx55RNU1ADRoqsbfE +T0CQTOT4ryugGrUp9J4Cwen6YbXZrS6+Kff5SQCAns0Qu8/bwj0DKkuBGLF+Mnwe +mq9bMzyZPUrPM3Gu48ao8YAfAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0P +BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQCBwXGblRxIEOlEP6ANZ1C8AHWyG8lR +CQduFclc0tmyCCz5fnyLK0aGu9LhXXe6/HSKqgs4mJqeqYOojdjkfOme/YdwDzjK +WIf0kRYQHcB6NeyEZwW8C7subTP1Xw6zbAmjvQrtCGvRM+fi3/cs1sSSkd/EoRk4 +7GM9qQl/JIIoCOGncninf2NQm5YSpbit6/mOQD7EhqXsw+bX+IRh3DHC1Apv/PoA +HlDNeM4vjWaBxsmvRSndrIvew1czboFM18oRSSIqAkU7dKZ0SbC11grzmNxMG2aD +f9y8FIG6RK/SEaOZuc+uBGXx7tj7dczpE/2puqYcaVGwcv4kkrC/ZuRm +-----END CERTIFICATE----- +` + +const serverCert = ` +-----BEGIN CERTIFICATE----- +MIIC8zCCAdugAwIBAgIBATANBgkqhkiG9w0BAQUFADATMREwDwYDVQQDEwhNeVRl +c3RDQTAeFw0xNDAxMjcxOTUyMjNaFw0yNDAxMjUxOTUyMjNaMCUxEjAQBgNVBAMT +CTEyNy4wLjAuMTEPMA0GA1UEChMGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAxYAKbeGyg0gP0xwVsZsufzk/SUCtD44Gp3lQYQ9QumQ1IVZu +PmZWwPWrzI93a1Abruz6ZhXaB3jcL5QPAy1N44IiFgVN45CZXBsqkpJe/abzRFOV +DRnHxattPDHdgwML5d3nURKGUM/7+ACj5E4pZEDlM3RIjIKVd+doJsL7n6myO8FE +tIpt4vTz1MFp3F+ntPnHU3BZ/VZ1UjSlFWnCjT0CR0tnXsPmlIaC98HThS8x5zNB +fvvSN+Zln8RWdNLnEVHVdqYtOQ828QbCx8s1HfClGgaVoSDrzz+qQgtZFO4wW264 +2CWkNd8DSJUJ/HlPNXmbXsrRMgvGaL7YUz2yRQIDAQABo0AwPjAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIFIDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHREECDAGhwR/ +AAABMA0GCSqGSIb3DQEBBQUAA4IBAQAE2g+wAFf9Xg5svcnb7+mfseYV16k9l5WG +onrmR3FLsbTxfbr4PZJMHrswPbi2NRk0+ETPUpcv1RP7pUB7wSEvuS1NPGcU92iP +58ycP3dYtLzmuu6BkgToZqwsCU8fC2zM0wt3+ifzPpDMffWWOioVuA3zdM9WPQYz ++Ofajd0XaZwFZS8uTI5WXgObz7Xqfmln4tF3Sq1CTyuJ44qK4p83XOKFq+L04aD0 +d0c8w3YQNUENny/vMP9mDu3FQ3SnDz2GKl1LSjGe2TUnkoMkDfdk4wSzndTz/ecb +QiCPKijwVPWNOWV3NDE2edMxDPxDoKoEm5F4UGfGjxSRnYCIoZLh +-----END CERTIFICATE----- +` + +const serverKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAxYAKbeGyg0gP0xwVsZsufzk/SUCtD44Gp3lQYQ9QumQ1IVZu +PmZWwPWrzI93a1Abruz6ZhXaB3jcL5QPAy1N44IiFgVN45CZXBsqkpJe/abzRFOV +DRnHxattPDHdgwML5d3nURKGUM/7+ACj5E4pZEDlM3RIjIKVd+doJsL7n6myO8FE +tIpt4vTz1MFp3F+ntPnHU3BZ/VZ1UjSlFWnCjT0CR0tnXsPmlIaC98HThS8x5zNB +fvvSN+Zln8RWdNLnEVHVdqYtOQ828QbCx8s1HfClGgaVoSDrzz+qQgtZFO4wW264 +2CWkNd8DSJUJ/HlPNXmbXsrRMgvGaL7YUz2yRQIDAQABAoIBAGsyEvcPAGg3DbfE +z5WFp9gPx2TIAOanbL8rnlAAEw4H47qDgfTGcSHsdeHioKuTYGMyZrpP8/YISGJe +l0NfLJ5mfH+9Q0hXrJWMfS/u2DYOjo0wXH8u1fpZEEISwqsgVS3fonSjfFmSea1j +E5GQRvEONBkYbWQuYFgjNqmLPS2r5lKbWCQvc1MB/vvVBwOTiO0ON7m/EkM5RKt9 +cDT5ZhhVjBpdmd9HpVbKTdBj8Q0l5/ZHZUEgZA6FDZEwYxTd9l87Z4YT+5SR0z9t +k8/Z0CHd3x3Rv891t7m66ZJkaOda8NC65/432MQEQwJltmrKnc22dS8yI26rrmpp +g3tcbSUCgYEA5nMXdQKS4vF+Kp10l/HqvGz2sU8qQaWYZQIg7Th3QJPo6N52po/s +nn3UF0P5mT1laeZ5ZQJKx4gnmuPnIZ2ZtJQDyFhIbRPcZ+2hSNSuLYVcrumOC3EP +3OZyFtFE1THO73aFe5e1jEdtoOne3Bds/Hq6NF45fkVdL+M9e8pfXIsCgYEA22W8 +zGjbWyrFOYvKknMQVtHnMx8BJEtsvWRknP6CWAv/8WyeZpE128Pve1m441AQnopS +CuOF5wFK0iUXBFbS3Pe1/1j3em6yfVznuUHqJ7Qc+dNzxVvkTK8jGB6x+vm+M9Hg +muHUM726IUxckoSNXbPNAVPIZab1NdSxam7F9m8CgYEAx55QZmIJXJ41XLKxqWC7 +peZ5NpPNlbncrTpPzUzJN94ntXfmrVckbxGt401VayEctMQYyZ9XqUlOjUP3FU5Q +M3S3Zhba/eljVX8o406fZf0MkNLs4QpZ5E6V6x/xEP+pMhKng6yhbVb+JpIPIvUD +yhyBKRWplbB+DRo5Sv685gsCgYA7l5m9h+m1DJv/cnn2Z2yTuHXtC8namuYRV1iA +0ByFX9UINXGc+GpBpCnDPm6ax5+MAJQiQwSW52H0TIDA+/hQbrQvhHHL/o9av8Zt +Kns4h5KrRQUYIUqUjamhnozHV9iS6LnyN87Usv8AlmY6oehoADN53dD702qdUYVT +HH2G3wKBgCdvqyw78FR/n8cUWesTPnxx5HCeWJ1J+2BESnUnPmKZ71CV1H7uweja +vPUxuuuGLKfNx84OKCfRDbtOgMOeyh9T1RmXry6Srz/7/udjlF0qmFiRXfBNAgoR +tNb0+Ri/vY0AHrQ7UnCbl12qPVaqhEXLr+kCGNEPFqpMJPPEeMK0 +-----END RSA PRIVATE KEY----- +` + +const clientCert = ` +-----BEGIN CERTIFICATE----- +MIIC4jCCAcqgAwIBAgIBAjANBgkqhkiG9w0BAQUFADATMREwDwYDVQQDEwhNeVRl +c3RDQTAeFw0xNDAxMjcxOTUyMjNaFw0yNDAxMjUxOTUyMjNaMCUxEjAQBgNVBAMT +CTEyNy4wLjAuMTEPMA0GA1UEChMGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAu7LMqd+agoH168Bsi0WJ36ulYqDypq+GZPF7uWOo2pE0raKH +B++31/hjnkt6yC5kLKVZZ0EfolBa9q4Cy6swfGaEMafy44ZCRneLnt1azL1N6Kfz ++U0KsOqyQDoMxYJG1gVTEZN19/U/ew2eazcxKyERI3oGCQ4SbpkxBTbfxtAFk49e +xIB3obsuMVUrmtXE4FkUkvG7NgpPUgrhp0yxYpj9zruZGzGGT1zNhcarbQ/4i7It +ZMbnv6pqQWtYDgnGX2TDRcEiXGeO+KrzhfpTRLfO3K4np8e8cmTyXM+4lMlWUgma +KrRdu1QXozGqRs47u2prGKGdSQWITpqNVCY8fQIDAQABoy8wLTAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQUF +AAOCAQEAhCuBCLznPc4O96hT3P8Fx19L3ltrWbc/pWrx8JjxUaGk8kNmjMjY+/Mt +JBbjUBx2kJwaY0EHMAfw7D1f1wcCeNycx/0dyb0E6xzhmPw5fY15GGNg8rzWwqSY ++i/1iqU0IRkmRHV7XCF+trd2H0Ec+V1Fd/61E2ccJfOL5aSAyWbMCUtWxS3QMnqH +FBfKdVEiY9WNht5hnvsXQBRaNhowJ6Cwa7/1/LZjmhcXiJ0xrc1Hggj3cvS+4vll +Ew+20a0tPKjD/v/2oSQL+qkeYKV4fhCGkaBHCpPlSJrqorb7B6NmPy3nS26ETKE/ +o2UCfZc5g2MU1ENa31kT1iuhKZapsA== +-----END CERTIFICATE----- +` + +const clientKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAu7LMqd+agoH168Bsi0WJ36ulYqDypq+GZPF7uWOo2pE0raKH +B++31/hjnkt6yC5kLKVZZ0EfolBa9q4Cy6swfGaEMafy44ZCRneLnt1azL1N6Kfz ++U0KsOqyQDoMxYJG1gVTEZN19/U/ew2eazcxKyERI3oGCQ4SbpkxBTbfxtAFk49e +xIB3obsuMVUrmtXE4FkUkvG7NgpPUgrhp0yxYpj9zruZGzGGT1zNhcarbQ/4i7It +ZMbnv6pqQWtYDgnGX2TDRcEiXGeO+KrzhfpTRLfO3K4np8e8cmTyXM+4lMlWUgma +KrRdu1QXozGqRs47u2prGKGdSQWITpqNVCY8fQIDAQABAoIBAGSEn3hFyEAmCyYi +2b5IEksXaC2GlgxQKb/7Vs/0oCPU6YonZPsKFMFzQx4tu+ZiecEzF8rlJGTPdbdv +fw3FcuTcHeVd1QSmDO4h7UK5tnu40XVMJKsY6CXQun8M13QajYbmORNLjjypOULU +C0fNueYoAj6mhX7p61MRdSAev/5+0+bVQQG/tSVDQzdngvKpaCunOphiB2VW2Aa0 +7aYPOFCoPB2uo0DwUmBB0yfx9x4hXX9ovQI0YFou7bq6iYJ0vlZBvYQ9YrVdxjKL +avcz1N5xM3WFAkZJSVT/Ho5+uTbZx4RrJ8b5T+t2spOKmXyAjwS2rL/XMAh8YRZ1 +u44duoECgYEA4jpK2qshgQ0t49rjVHEDKX5x7ElEZefl0rHZ/2X/uHUDKpKj2fTq +3TQzHquiQ4Aof7OEB9UE3DGrtpvo/j/PYxL5Luu5VR4AIEJm+CA8GYuE96+uIL0Z +M2r3Lux6Bp30Z47Eit2KiY4fhrWs59WB3NHHoFxgzHSVbnuA02gcX2ECgYEA1GZw +iXIVYaK07ED+q/0ObyS5hD1cMhJ7ifSN9BxuG0qUpSigbkTGj09fUDS4Fqsz9dvz +F0P93fZvyia242TIfDUwJEsDQCgHk7SGa4Rx/p/3x/obIEERk7K76Hdg93U5NXhV +NvczvgL0HYxnb+qtumwMgGPzncB4lGcTnRyOfp0CgYBTIsDnYwRI/KLknUf1fCKB +WSpcfwBXwsS+jQVjygQTsUyclI8KResZp1kx6DkVPT+kzj+y8SF8GfTUgq844BJC +gnJ4P8A3+3JoaH6WqKHtcUxICZOgDF36e1CjOdwOGnX6qIipz4hdzJDhXFpSSDAV +CjKmR8x61k0j8NcC2buzgQKBgFr7eo9VwBTvpoJhIPY5UvqHB7S+uAR26FZi3H/J +wdyM6PmKWpaBfXCb9l8cBhMnyP0y94FqzY9L5fz48nSbkkmqWvHg9AaCXySFOuNJ +e68vhOszlnUNimLzOAzPPkkh/JyL7Cy8XXyyNTGHGDPXmg12BTDmH8/eR4iCUuOE +/QD9AoGBALQ/SkvfO3D5+k9e/aTHRuMJ0+PWdLUMTZ39oJQxUx+qj7/xpjDvWTBn +eDmF/wjnIAg+020oXyBYo6plEZfDz3EYJQZ+3kLLEU+O/A7VxCakPYPwCr7N/InL +Ccg/TVSIXxw/6uJnojoAjMIEU45NoP6RMp0mWYYb2OlteEv08Ovp +-----END RSA PRIVATE KEY----- +` diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/types.go b/Godeps/_workspace/src/github.com/streadway/amqp/types.go new file mode 100644 index 000000000..8071bf7cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/types.go @@ -0,0 +1,390 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" + "io" + "time" +) + +var ( + // Errors that this library could return/emit from a channel or connection + ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"} + ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"} + ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"} + ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"} + ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"} + ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"} + ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"} + ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"} + ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"} + ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"} +) + +// Error captures the code and reason a channel or connection has been closed +// by the server. +type Error struct { + Code int // constant code from the specification + Reason string // description of the error + Server bool // true when initiated from the server, false when from this library + Recover bool // true when this error can be recovered by retrying later or with differnet parameters +} + +func newError(code uint16, text string) *Error { + return &Error{ + Code: int(code), + Reason: text, + Recover: isSoftExceptionCode(int(code)), + Server: true, + } +} + +func (me Error) Error() string { + return fmt.Sprintf("Exception (%d) Reason: %q", me.Code, me.Reason) +} + +// Used by header frames to capture routing and header information +type properties struct { + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implemention use - Transient (1) or Persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + reserved1 string // was cluster-id - process for buffer consumption +} + +// DeliveryMode. Transient means higher throughput but messages will not be +// restored on broker restart. The delivery mode of publishings is unrelated +// to the durability of the queues they reside on. Transient messages will +// not be restored to durable queues, persistent messages will be restored to +// durable queues and lost on non-durable queues during server restart. +// +// This remains typed as uint8 to match Publishing.DeliveryMode. Other +// delivery modes specific to custom queue implementations are not enumerated +// here. +const ( + Transient uint8 = 1 + Persistent uint8 = 2 +) + +// The property flags are an array of bits that indicate the presence or +// absence of each property value in sequence. The bits are ordered from most +// high to low - bit 15 indicates the first property. +const ( + flagContentType = 0x8000 + flagContentEncoding = 0x4000 + flagHeaders = 0x2000 + flagDeliveryMode = 0x1000 + flagPriority = 0x0800 + flagCorrelationId = 0x0400 + flagReplyTo = 0x0200 + flagExpiration = 0x0100 + flagMessageId = 0x0080 + flagTimestamp = 0x0040 + flagType = 0x0020 + flagUserId = 0x0010 + flagAppId = 0x0008 + flagReserved1 = 0x0004 +) + +// Queue captures the current server state of the queue on the server returned +// from Channel.QueueDeclare or Channel.QueueInspect. +type Queue struct { + Name string // server confirmed or generated name + Messages int // count of messages not awaiting acknowledgment + Consumers int // number of consumers receiving deliveries +} + +// Publishing captures the client message sent to the server. The fields +// outside of the Headers table included in this struct mirror the underlying +// fields in the content frame. They use native types for convenience and +// efficiency. +type Publishing struct { + // Application or exchange specific fields, + // the headers exchange will inspect this field. + Headers Table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // Transient (0 or 1) or Persistent (2) + Priority uint8 // 0 to 9 + CorrelationId string // correlation identifier + ReplyTo string // address to to reply to (ex: RPC) + Expiration string // message expiration spec + MessageId string // message identifier + Timestamp time.Time // message timestamp + Type string // message type name + UserId string // creating user id - ex: "guest" + AppId string // creating application id + + // The application specific payload of the message + Body []byte +} + +// Blocking notifies the server's TCP flow control of the Connection. When a +// server hits a memory or disk alarm it will block all connections until the +// resources are reclaimed. Use NotifyBlock on the Connection to receive these +// events. +type Blocking struct { + Active bool // TCP pushback active/inactive on server + Reason string // Server reason for activation +} + +// Confirmation notifies the acknowledgment or negative acknowledgement of a +// publishing identified by its delivery tag. Use NotifyPublish on the Channel +// to consume these events. +type Confirmation struct { + DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode + Ack bool // True when the server succesfully received the publishing +} + +// Decimal matches the AMQP decimal type. Scale is the number of decimal +// digits Scale == 2, Value == 12345, Decimal == 123.45 +type Decimal struct { + Scale uint8 + Value int32 +} + +// Table stores user supplied fields of the following types: +// +// bool +// byte +// float32 +// float64 +// int16 +// int32 +// int64 +// nil +// string +// time.Time +// amqp.Decimal +// amqp.Table +// []byte +// []interface{} - containing above types +// +// Functions taking a table will immediately fail when the table contains a +// value of an unsupported type. +// +// The caller must be specific in which precision of integer it wishes to +// encode. +// +// Use a type assertion when reading values from a table for type converstion. +// +// RabbitMQ expects int32 for integer values. +// +type Table map[string]interface{} + +func validateField(f interface{}) error { + switch fv := f.(type) { + case nil, bool, byte, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time: + return nil + + case []interface{}: + for _, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("in array %s", err) + } + } + return nil + + case Table: + for k, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("table field %q %s", k, err) + } + } + return nil + } + + return fmt.Errorf("value %t not supported", f) +} + +func (t Table) Validate() error { + return validateField(t) +} + +// Heap interface for maintaining delivery tags +type tagSet []uint64 + +func (me tagSet) Len() int { return len(me) } +func (me tagSet) Less(i, j int) bool { return (me)[i] < (me)[j] } +func (me tagSet) Swap(i, j int) { (me)[i], (me)[j] = (me)[j], (me)[i] } +func (me *tagSet) Push(tag interface{}) { *me = append(*me, tag.(uint64)) } +func (me *tagSet) Pop() interface{} { + val := (*me)[len(*me)-1] + *me = (*me)[:len(*me)-1] + return val +} + +type message interface { + id() (uint16, uint16) + wait() bool + read(io.Reader) error + write(io.Writer) error +} + +type messageWithContent interface { + message + getContent() (properties, []byte) + setContent(properties, []byte) +} + +/* +The base interface implemented as: + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects +malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or “gathering reads” to avoid doing three separate +system calls to read a frame. + +*/ +type frame interface { + write(io.Writer) error + channel() uint16 +} + +type reader struct { + r io.Reader +} + +type writer struct { + w io.Writer +} + +// Implements the frame interface for Connection RPC +type protocolHeader struct{} + +func (protocolHeader) write(w io.Writer) error { + _, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) + return err +} + +func (protocolHeader) channel() uint16 { + panic("only valid as initial handshake") +} + +/* +Method frames carry the high-level protocol commands (which we call "methods"). +One method frame carries one command. The method frame payload has this format: + + 0 2 4 + +----------+-----------+-------------- - - + | class-id | method-id | arguments... + +----------+-----------+-------------- - - + short short ... + +To process a method frame, we: + 1. Read the method frame payload. + 2. Unpack it into a structure. A given method always has the same structure, + so we can unpack the method rapidly. 3. Check that the method is allowed in + the current context. + 4. Check that the method arguments are valid. + 5. Execute the method. + +Method frame bodies are constructed as a list of AMQP data fields (bits, +integers, strings and string tables). The marshalling code is trivially +generated directly from the protocol specifications, and can be very rapid. +*/ +type methodFrame struct { + ChannelId uint16 + ClassId uint16 + MethodId uint16 + Method message +} + +func (me *methodFrame) channel() uint16 { return me.ChannelId } + +/* +Heartbeating is a technique designed to undo one of TCP/IP's features, namely +its ability to recover from a broken physical connection by closing only after +a quite long time-out. In some scenarios we need to know very rapidly if a +peer is disconnected or not responding for other reasons (e.g. it is looping). +Since heartbeating can be done at a low level, we implement this as a special +type of frame that peers exchange at the transport level, rather than as a +class method. +*/ +type heartbeatFrame struct { + ChannelId uint16 +} + +func (me *heartbeatFrame) channel() uint16 { return me.ChannelId } + +/* +Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally +defined as carrying content. When a peer sends such a method frame, it always +follows it with a content header and zero or more content body frames. + +A content header frame has this format: + + 0 2 4 12 14 + +----------+--------+-----------+----------------+------------- - - + | class-id | weight | body size | property flags | property list... + +----------+--------+-----------+----------------+------------- - - + short short long long short remainder... + +We place content body in distinct frames (rather than including it in the +method) so that AMQP may support "zero copy" techniques in which content is +never marshalled or encoded. We place the content properties in their own +frame so that recipients can selectively discard contents they do not want to +process +*/ +type headerFrame struct { + ChannelId uint16 + ClassId uint16 + weight uint16 + Size uint64 + Properties properties +} + +func (me *headerFrame) channel() uint16 { return me.ChannelId } + +/* +Content is the application data we carry from client-to-client via the AMQP +server. Content is, roughly speaking, a set of properties plus a binary data +part. The set of allowed properties are defined by the Basic class, and these +form the "content header frame". The data can be any size, and MAY be broken +into several (or many) chunks, each forming a "content body frame". + +Looking at the frames for a specific channel, as they pass on the wire, we +might see something like this: + + [method] + [method] [header] [body] [body] + [method] + ... +*/ +type bodyFrame struct { + ChannelId uint16 + Body []byte +} + +func (me *bodyFrame) channel() uint16 { return me.ChannelId } diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/uri.go b/Godeps/_workspace/src/github.com/streadway/amqp/uri.go new file mode 100644 index 000000000..582464db5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/uri.go @@ -0,0 +1,170 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "fmt" + "net/url" + "strconv" + "strings" +) + +var errURIScheme = errors.New("AMQP scheme must be either 'amqp://' or 'amqps://'") + +var schemePorts = map[string]int{ + "amqp": 5672, + "amqps": 5671, +} + +var defaultURI = URI{ + Scheme: "amqp", + Host: "localhost", + Port: 5672, + Username: "guest", + Password: "guest", + Vhost: "/", +} + +// URI represents a parsed AMQP URI string. +type URI struct { + Scheme string + Host string + Port int + Username string + Password string + Vhost string +} + +// ParseURI attempts to parse the given AMQP URI according to the spec. +// See http://www.rabbitmq.com/uri-spec.html. +// +// Default values for the fields are: +// +// Scheme: amqp +// Host: localhost +// Port: 5672 +// Username: guest +// Password: guest +// Vhost: / +// +func ParseURI(uri string) (URI, error) { + me := defaultURI + + u, err := url.Parse(uri) + if err != nil { + return me, err + } + + defaultPort, okScheme := schemePorts[u.Scheme] + + if okScheme { + me.Scheme = u.Scheme + } else { + return me, errURIScheme + } + + host, port := splitHostPort(u.Host) + + if host != "" { + me.Host = host + } + + if port != "" { + port32, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return me, err + } + me.Port = int(port32) + } else { + me.Port = defaultPort + } + + if u.User != nil { + me.Username = u.User.Username() + if password, ok := u.User.Password(); ok { + me.Password = password + } + } + + if u.Path != "" { + if strings.HasPrefix(u.Path, "/") { + if u.Host == "" && strings.HasPrefix(u.Path, "///") { + // net/url doesn't handle local context authorities and leaves that up + // to the scheme handler. In our case, we translate amqp:/// into the + // default host and whatever the vhost should be + if len(u.Path) > 3 { + me.Vhost = u.Path[3:] + } + } else if len(u.Path) > 1 { + me.Vhost = u.Path[1:] + } + } else { + me.Vhost = u.Path + } + } + + return me, nil +} + +// Splits host:port, host, [ho:st]:port, or [ho:st]. Unlike net.SplitHostPort +// which splits :port, host:port or [host]:port +// +// Handles hosts that have colons that are in brackets like [::1]:http +func splitHostPort(addr string) (host, port string) { + i := strings.LastIndex(addr, ":") + + if i >= 0 { + host, port = addr[:i], addr[i+1:] + + if len(port) > 0 && port[len(port)-1] == ']' && addr[0] == '[' { + // we've split on an inner colon, the port was missing outside of the + // brackets so use the full addr. We could assert that host should not + // contain any colons here + host, port = addr, "" + } + } else { + host = addr + } + + return +} + +// PlainAuth returns a PlainAuth structure based on the parsed URI's +// Username and Password fields. +func (me URI) PlainAuth() *PlainAuth { + return &PlainAuth{ + Username: me.Username, + Password: me.Password, + } +} + +func (me URI) String() string { + var authority string + + if me.Username != defaultURI.Username || me.Password != defaultURI.Password { + authority += me.Username + + if me.Password != defaultURI.Password { + authority += ":" + me.Password + } + + authority += "@" + } + + authority += me.Host + + if defaultPort, found := schemePorts[me.Scheme]; !found || defaultPort != me.Port { + authority += ":" + strconv.FormatInt(int64(me.Port), 10) + } + + var vhost string + if me.Vhost != defaultURI.Vhost { + vhost = me.Vhost + } + + return fmt.Sprintf("%s://%s/%s", me.Scheme, authority, url.QueryEscape(vhost)) +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/uri_test.go b/Godeps/_workspace/src/github.com/streadway/amqp/uri_test.go new file mode 100644 index 000000000..5d93e0bc7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/uri_test.go @@ -0,0 +1,328 @@ +package amqp + +import ( + "testing" +) + +// Test matrix defined on http://www.rabbitmq.com/uri-spec.html +type testURI struct { + url string + username string + password string + host string + port int + vhost string + canon string +} + +var uriTests = []testURI{ + { + url: "amqp://user:pass@host:10000/vhost", + username: "user", + password: "pass", + host: "host", + port: 10000, + vhost: "vhost", + canon: "amqp://user:pass@host:10000/vhost", + }, + + // this fails due to net/url not parsing pct-encoding in host + // testURI{url: "amqp://user%61:%61pass@ho%61st:10000/v%2Fhost", + // username: "usera", + // password: "apass", + // host: "hoast", + // port: 10000, + // vhost: "v/host", + // }, + + { + url: "amqp://", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://localhost/", + }, + + { + url: "amqp://:@/", + username: "", + password: "", + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://:@localhost/", + }, + + { + url: "amqp://user@", + username: "user", + password: defaultURI.Password, + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://user@localhost/", + }, + + { + url: "amqp://user:pass@", + username: "user", + password: "pass", + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://user:pass@localhost/", + }, + + { + url: "amqp://guest:pass@", + username: "guest", + password: "pass", + host: defaultURI.Host, + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://guest:pass@localhost/", + }, + + { + url: "amqp://host", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://host/", + }, + + { + url: "amqp://:10000", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: 10000, + vhost: defaultURI.Vhost, + canon: "amqp://localhost:10000/", + }, + + { + url: "amqp:///vhost", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: defaultURI.Port, + vhost: "vhost", + canon: "amqp://localhost/vhost", + }, + + { + url: "amqp://host/", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://host/", + }, + + { + url: "amqp://host/%2F", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: "/", + canon: "amqp://host/", + }, + + { + url: "amqp://host/%2F%2F", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: "//", + canon: "amqp://host/%2F%2F", + }, + + { + url: "amqp://host/%2Fslash%2F", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: defaultURI.Port, + vhost: "/slash/", + canon: "amqp://host/%2Fslash%2F", + }, + + { + url: "amqp://192.168.1.1:1000/", + username: defaultURI.Username, + password: defaultURI.Password, + host: "192.168.1.1", + port: 1000, + vhost: defaultURI.Vhost, + canon: "amqp://192.168.1.1:1000/", + }, + + { + url: "amqp://[::1]", + username: defaultURI.Username, + password: defaultURI.Password, + host: "[::1]", + port: defaultURI.Port, + vhost: defaultURI.Vhost, + canon: "amqp://[::1]/", + }, + + { + url: "amqp://[::1]:1000", + username: defaultURI.Username, + password: defaultURI.Password, + host: "[::1]", + port: 1000, + vhost: defaultURI.Vhost, + canon: "amqp://[::1]:1000/", + }, + + { + url: "amqps:///", + username: defaultURI.Username, + password: defaultURI.Password, + host: defaultURI.Host, + port: schemePorts["amqps"], + vhost: defaultURI.Vhost, + canon: "amqps://localhost/", + }, + + { + url: "amqps://host:1000/", + username: defaultURI.Username, + password: defaultURI.Password, + host: "host", + port: 1000, + vhost: defaultURI.Vhost, + canon: "amqps://host:1000/", + }, +} + +func TestURISpec(t *testing.T) { + for _, test := range uriTests { + u, err := ParseURI(test.url) + if err != nil { + t.Fatal("Could not parse spec URI: ", test.url, " err: ", err) + } + + if test.username != u.Username { + t.Error("For: ", test.url, " usernames do not match. want: ", test.username, " got: ", u.Username) + } + + if test.password != u.Password { + t.Error("For: ", test.url, " passwords do not match. want: ", test.password, " got: ", u.Password) + } + + if test.host != u.Host { + t.Error("For: ", test.url, " hosts do not match. want: ", test.host, " got: ", u.Host) + } + + if test.port != u.Port { + t.Error("For: ", test.url, " ports do not match. want: ", test.port, " got: ", u.Port) + } + + if test.vhost != u.Vhost { + t.Error("For: ", test.url, " vhosts do not match. want: ", test.vhost, " got: ", u.Vhost) + } + + if test.canon != u.String() { + t.Error("For: ", test.url, " canonical string does not match. want: ", test.canon, " got: ", u.String()) + } + } +} + +func TestURIUnknownScheme(t *testing.T) { + if _, err := ParseURI("http://example.com/"); err == nil { + t.Fatal("Expected error when parsing non-amqp scheme") + } +} + +func TestURIScheme(t *testing.T) { + if _, err := ParseURI("amqp://example.com/"); err != nil { + t.Fatalf("Expected to parse amqp scheme, got %v", err) + } + + if _, err := ParseURI("amqps://example.com/"); err != nil { + t.Fatalf("Expected to parse amqps scheme, got %v", err) + } +} + +func TestURIDefaults(t *testing.T) { + url := "amqp://" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != "amqp://localhost/" { + t.Fatal("Defaults not encoded properly got:", uri.String()) + } +} + +func TestURIComplete(t *testing.T) { + url := "amqp://bob:dobbs@foo.bar:5678/private" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != url { + t.Fatal("Defaults not encoded properly want:", url, " got:", uri.String()) + } +} + +func TestURIDefaultPortAmqpNotIncluded(t *testing.T) { + url := "amqp://foo.bar:5672/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != "amqp://foo.bar/" { + t.Fatal("Defaults not encoded properly got:", uri.String()) + } +} + +func TestURIDefaultPortAmqp(t *testing.T) { + url := "amqp://foo.bar/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.Port != 5672 { + t.Fatal("Default port not correct for amqp, got:", uri.Port) + } +} + +func TestURIDefaultPortAmqpsNotIncludedInString(t *testing.T) { + url := "amqps://foo.bar:5671/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.String() != "amqps://foo.bar/" { + t.Fatal("Defaults not encoded properly got:", uri.String()) + } +} + +func TestURIDefaultPortAmqps(t *testing.T) { + url := "amqps://foo.bar/" + uri, err := ParseURI(url) + if err != nil { + t.Fatal("Could not parse") + } + + if uri.Port != 5671 { + t.Fatal("Default port not correct for amqps, got:", uri.Port) + } +} diff --git a/Godeps/_workspace/src/github.com/streadway/amqp/write.go b/Godeps/_workspace/src/github.com/streadway/amqp/write.go new file mode 100644 index 000000000..d392ca237 --- /dev/null +++ b/Godeps/_workspace/src/github.com/streadway/amqp/write.go @@ -0,0 +1,411 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "math" + "time" +) + +func (me *writer) WriteFrame(frame frame) (err error) { + if err = frame.write(me.w); err != nil { + return + } + + if buf, ok := me.w.(*bufio.Writer); ok { + err = buf.Flush() + } + + return +} + +func (me *methodFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + + if me.Method == nil { + return errors.New("malformed frame: missing method") + } + + class, method := me.Method.id() + + if err = binary.Write(&payload, binary.BigEndian, class); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, method); err != nil { + return + } + + if err = me.Method.write(&payload); err != nil { + return + } + + return writeFrame(w, frameMethod, me.ChannelId, payload.Bytes()) +} + +// Heartbeat +// +// Payload is empty +func (me *heartbeatFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameHeartbeat, me.ChannelId, []byte{}) +} + +// CONTENT HEADER +// 0 2 4 12 14 +// +----------+--------+-----------+----------------+------------- - - +// | class-id | weight | body size | property flags | property list... +// +----------+--------+-----------+----------------+------------- - - +// short short long long short remainder... +// +func (me *headerFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + var zeroTime time.Time + + if err = binary.Write(&payload, binary.BigEndian, me.ClassId); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, me.weight); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, me.Size); err != nil { + return + } + + // First pass will build the mask to be serialized, second pass will serialize + // each of the fields that appear in the mask. + + var mask uint16 + + if len(me.Properties.ContentType) > 0 { + mask = mask | flagContentType + } + if len(me.Properties.ContentEncoding) > 0 { + mask = mask | flagContentEncoding + } + if me.Properties.Headers != nil && len(me.Properties.Headers) > 0 { + mask = mask | flagHeaders + } + if me.Properties.DeliveryMode > 0 { + mask = mask | flagDeliveryMode + } + if me.Properties.Priority > 0 { + mask = mask | flagPriority + } + if len(me.Properties.CorrelationId) > 0 { + mask = mask | flagCorrelationId + } + if len(me.Properties.ReplyTo) > 0 { + mask = mask | flagReplyTo + } + if len(me.Properties.Expiration) > 0 { + mask = mask | flagExpiration + } + if len(me.Properties.MessageId) > 0 { + mask = mask | flagMessageId + } + if me.Properties.Timestamp != zeroTime { + mask = mask | flagTimestamp + } + if len(me.Properties.Type) > 0 { + mask = mask | flagType + } + if len(me.Properties.UserId) > 0 { + mask = mask | flagUserId + } + if len(me.Properties.AppId) > 0 { + mask = mask | flagAppId + } + + if err = binary.Write(&payload, binary.BigEndian, mask); err != nil { + return + } + + if hasProperty(mask, flagContentType) { + if err = writeShortstr(&payload, me.Properties.ContentType); err != nil { + return + } + } + if hasProperty(mask, flagContentEncoding) { + if err = writeShortstr(&payload, me.Properties.ContentEncoding); err != nil { + return + } + } + if hasProperty(mask, flagHeaders) { + if err = writeTable(&payload, me.Properties.Headers); err != nil { + return + } + } + if hasProperty(mask, flagDeliveryMode) { + if err = binary.Write(&payload, binary.BigEndian, me.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(mask, flagPriority) { + if err = binary.Write(&payload, binary.BigEndian, me.Properties.Priority); err != nil { + return + } + } + if hasProperty(mask, flagCorrelationId) { + if err = writeShortstr(&payload, me.Properties.CorrelationId); err != nil { + return + } + } + if hasProperty(mask, flagReplyTo) { + if err = writeShortstr(&payload, me.Properties.ReplyTo); err != nil { + return + } + } + if hasProperty(mask, flagExpiration) { + if err = writeShortstr(&payload, me.Properties.Expiration); err != nil { + return + } + } + if hasProperty(mask, flagMessageId) { + if err = writeShortstr(&payload, me.Properties.MessageId); err != nil { + return + } + } + if hasProperty(mask, flagTimestamp) { + if err = binary.Write(&payload, binary.BigEndian, uint64(me.Properties.Timestamp.Unix())); err != nil { + return + } + } + if hasProperty(mask, flagType) { + if err = writeShortstr(&payload, me.Properties.Type); err != nil { + return + } + } + if hasProperty(mask, flagUserId) { + if err = writeShortstr(&payload, me.Properties.UserId); err != nil { + return + } + } + if hasProperty(mask, flagAppId) { + if err = writeShortstr(&payload, me.Properties.AppId); err != nil { + return + } + } + + return writeFrame(w, frameHeader, me.ChannelId, payload.Bytes()) +} + +// Body +// +// Payload is one byterange from the full body who's size is declared in the +// Header frame +func (me *bodyFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameBody, me.ChannelId, me.Body) +} + +func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) { + end := []byte{frameEnd} + size := uint(len(payload)) + + _, err = w.Write([]byte{ + byte(typ), + byte((channel & 0xff00) >> 8), + byte((channel & 0x00ff) >> 0), + byte((size & 0xff000000) >> 24), + byte((size & 0x00ff0000) >> 16), + byte((size & 0x0000ff00) >> 8), + byte((size & 0x000000ff) >> 0), + }) + + if err != nil { + return + } + + if _, err = w.Write(payload); err != nil { + return + } + + if _, err = w.Write(end); err != nil { + return + } + + return +} + +func writeShortstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length uint8 = uint8(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +func writeLongstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length uint32 = uint32(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func writeField(w io.Writer, value interface{}) (err error) { + var buf [9]byte + var enc []byte + + switch v := value.(type) { + case bool: + buf[0] = 't' + if v { + buf[1] = byte(1) + } else { + buf[1] = byte(0) + } + enc = buf[:2] + + case byte: + buf[0] = 'b' + buf[1] = byte(v) + enc = buf[:2] + + case int16: + buf[0] = 's' + binary.BigEndian.PutUint16(buf[1:3], uint16(v)) + enc = buf[:3] + + case int32: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int64: + buf[0] = 'l' + binary.BigEndian.PutUint64(buf[1:9], uint64(v)) + enc = buf[:9] + + case float32: + buf[0] = 'f' + binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v)) + enc = buf[:5] + + case float64: + buf[0] = 'd' + binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v)) + enc = buf[:9] + + case Decimal: + buf[0] = 'D' + buf[1] = byte(v.Scale) + binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value)) + enc = buf[:6] + + case string: + buf[0] = 'S' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + enc = append(buf[:5], []byte(v)...) + + case []interface{}: // field-array + buf[0] = 'A' + + sec := new(bytes.Buffer) + for _, val := range v { + if err = writeField(sec, val); err != nil { + return + } + } + + binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + if _, err = w.Write(sec.Bytes()); err != nil { + return + } + + return + + case time.Time: + buf[0] = 'T' + binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix())) + enc = buf[:9] + + case Table: + if _, err = w.Write([]byte{'F'}); err != nil { + return + } + return writeTable(w, v) + + case []byte: + buf[0] = 'x' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + if _, err = w.Write(buf[0:5]); err != nil { + return + } + if _, err = w.Write(v); err != nil { + return + } + return + + case nil: + buf[0] = 'V' + enc = buf[:1] + + default: + return ErrFieldType + } + + _, err = w.Write(enc) + + return +} + +func writeTable(w io.Writer, table Table) (err error) { + var buf bytes.Buffer + + for key, val := range table { + if err = writeShortstr(&buf, key); err != nil { + return + } + if err = writeField(&buf, val); err != nil { + return + } + } + + return writeLongstr(w, string(buf.Bytes())) +} diff --git a/outputs/all/all.go b/outputs/all/all.go index 8586174a5..4d1369f69 100644 --- a/outputs/all/all.go +++ b/outputs/all/all.go @@ -1,6 +1,7 @@ package all import ( + _ "github.com/influxdb/telegraf/outputs/amqp" _ "github.com/influxdb/telegraf/outputs/datadog" _ "github.com/influxdb/telegraf/outputs/influxdb" _ "github.com/influxdb/telegraf/outputs/kafka" diff --git a/outputs/amqp/amqp.go b/outputs/amqp/amqp.go new file mode 100644 index 000000000..9a657311c --- /dev/null +++ b/outputs/amqp/amqp.go @@ -0,0 +1,112 @@ +package amqp + +import ( + "fmt" + + "github.com/influxdb/influxdb/client" + "github.com/influxdb/telegraf/outputs" + "github.com/streadway/amqp" +) + +type AMQP struct { + // AMQP brokers to send metrics to + URL string + // AMQP exchange + Exchange string + // Routing key + RoutingKey string + + channel *amqp.Channel +} + +var sampleConfig = ` + # AMQP url + url = "amqp://localhost:5672/influxdb" + # AMQP exchange + exchange = "telegraf" +` + +func (q *AMQP) Connect() error { + connection, err := amqp.Dial(q.URL) + if err != nil { + return err + } + channel, err := connection.Channel() + if err != nil { + return fmt.Errorf("Failed to open a channel: %s", err) + } + + err = channel.ExchangeDeclare( + q.Exchange, // name + "topic", // type + true, // durable + false, // delete when unused + false, // internal + false, // no-wait + nil, // arguments + ) + if err != nil { + return fmt.Errorf("Failed to declare an exchange: %s", err) + } + q.channel = channel + return nil +} + +func (q *AMQP) Close() error { + return q.channel.Close() +} + +func (q *AMQP) SampleConfig() string { + return sampleConfig +} + +func (q *AMQP) Description() string { + return "Configuration for the AMQP server to send metrics to" +} + +func (q *AMQP) Write(bp client.BatchPoints) error { + if len(bp.Points) == 0 { + return nil + } + + for _, p := range bp.Points { + // Combine tags from Point and BatchPoints and grab the resulting + // line-protocol output string to write to Kafka + var value, key string + if p.Raw != "" { + value = p.Raw + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + value = p.MarshalString() + } + + if h, ok := p.Tags["dc"]; ok { + key = h + } + + err := q.channel.Publish( + q.Exchange, // exchange + key, // routing key + false, // mandatory + false, // immediate + amqp.Publishing{ + ContentType: "text/plain", + Body: []byte(value), + }) + if err != nil { + return fmt.Errorf("FAILED to send amqp message: %s\n", err) + } + } + return nil +} + +func init() { + outputs.Add("amqp", func() outputs.Output { + return &AMQP{} + }) +} diff --git a/outputs/amqp/amqp_test.go b/outputs/amqp/amqp_test.go new file mode 100644 index 000000000..15781fbc9 --- /dev/null +++ b/outputs/amqp/amqp_test.go @@ -0,0 +1,28 @@ +package amqp + +import ( + "testing" + + "github.com/influxdb/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConnectAndWrite(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + var url = "amqp://" + testutil.GetLocalHost() + ":5672/" + q := &AMQP{ + URL: url, + Exchange: "telegraf_test", + } + + // Verify that we can connect to the Kafka broker + err := q.Connect() + require.NoError(t, err) + + // Verify that we can successfully write data to the kafka broker + err = q.Write(testutil.MockBatchPoints()) + require.NoError(t, err) +} From 5d280e4d2523f91f95232be8d9209befced8d91e Mon Sep 17 00:00:00 2001 From: Eugene Dementiev Date: Wed, 16 Sep 2015 03:25:56 +0300 Subject: [PATCH 076/125] AMQP output plugin typo fixes and added README and RoutingTag --- outputs/amqp/README.md | 8 ++++++++ outputs/amqp/amqp.go | 15 ++++++++++----- outputs/amqp/amqp_test.go | 4 ++-- scripts/docker-compose.yml | 8 +++++++- 4 files changed, 27 insertions(+), 8 deletions(-) create mode 100644 outputs/amqp/README.md diff --git a/outputs/amqp/README.md b/outputs/amqp/README.md new file mode 100644 index 000000000..e708e3496 --- /dev/null +++ b/outputs/amqp/README.md @@ -0,0 +1,8 @@ +# AMQP Output Plugin + +This plugin writes to a AMQP exchange using tag, defined in configuration file +as RoutingTag, as a routing key. + +If RoutingTag is empty, then empty routing key will be used. + +This plugin doesn't bind exchange to a queue, so it should be done by consumer. diff --git a/outputs/amqp/amqp.go b/outputs/amqp/amqp.go index 9a657311c..2d3f1c399 100644 --- a/outputs/amqp/amqp.go +++ b/outputs/amqp/amqp.go @@ -14,7 +14,7 @@ type AMQP struct { // AMQP exchange Exchange string // Routing key - RoutingKey string + RoutingTag string channel *amqp.Channel } @@ -24,6 +24,9 @@ var sampleConfig = ` url = "amqp://localhost:5672/influxdb" # AMQP exchange exchange = "telegraf" + # AMQP tag name used as a routing key + # If there's no tag in a point, empty routing key will be used + routing_tag = "dc" ` func (q *AMQP) Connect() error { @@ -71,7 +74,7 @@ func (q *AMQP) Write(bp client.BatchPoints) error { for _, p := range bp.Points { // Combine tags from Point and BatchPoints and grab the resulting - // line-protocol output string to write to Kafka + // line-protocol output string to write to AMQP var value, key string if p.Raw != "" { value = p.Raw @@ -85,8 +88,10 @@ func (q *AMQP) Write(bp client.BatchPoints) error { value = p.MarshalString() } - if h, ok := p.Tags["dc"]; ok { - key = h + if q.RoutingTag != "" { + if h, ok := p.Tags[q.RoutingTag]; ok { + key = h + } } err := q.channel.Publish( @@ -99,7 +104,7 @@ func (q *AMQP) Write(bp client.BatchPoints) error { Body: []byte(value), }) if err != nil { - return fmt.Errorf("FAILED to send amqp message: %s\n", err) + return fmt.Errorf("FAILED to send amqp message: %s", err) } } return nil diff --git a/outputs/amqp/amqp_test.go b/outputs/amqp/amqp_test.go index 15781fbc9..247801f9e 100644 --- a/outputs/amqp/amqp_test.go +++ b/outputs/amqp/amqp_test.go @@ -18,11 +18,11 @@ func TestConnectAndWrite(t *testing.T) { Exchange: "telegraf_test", } - // Verify that we can connect to the Kafka broker + // Verify that we can connect to the AMQP broker err := q.Connect() require.NoError(t, err) - // Verify that we can successfully write data to the kafka broker + // Verify that we can successfully write data to the amqp broker err = q.Write(testutil.MockBatchPoints()) require.NoError(t, err) } diff --git a/scripts/docker-compose.yml b/scripts/docker-compose.yml index a41cb67f4..bf1a40ef3 100644 --- a/scripts/docker-compose.yml +++ b/scripts/docker-compose.yml @@ -26,8 +26,14 @@ kafka: ADVERTISED_HOST: ADVERTISED_PORT: 9092 +rabbitmq: + image: rabbitmq:3-management + hostname: docker_rabbit + ports: + - "15672:15672" + - "5672:5672" + opentsdb: image: lancope/opentsdb ports: - "24242:4242" - From c843b53c30bb7c015b85eb170c241705371f5f7d Mon Sep 17 00:00:00 2001 From: Roman Plessl Date: Fri, 11 Sep 2015 22:24:53 +0200 Subject: [PATCH 077/125] added docker image unit test with OpenTSDB --- scripts/docker-compose.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/docker-compose.yml b/scripts/docker-compose.yml index bf1a40ef3..c7d360863 100644 --- a/scripts/docker-compose.yml +++ b/scripts/docker-compose.yml @@ -26,6 +26,12 @@ kafka: ADVERTISED_HOST: ADVERTISED_PORT: 9092 + +opentsdb: + image: lancope/opentsdb + ports: + - "24242:4242" + rabbitmq: image: rabbitmq:3-management hostname: docker_rabbit From d979ee55731ec754018dc91bbfa2756db6f54e9e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 16 Sep 2015 12:10:26 -0700 Subject: [PATCH 078/125] AMQP routing tag doc & add routing tag for Kafka closes #200 --- outputs/amqp/amqp.go | 10 +++++----- outputs/kafka/kafka.go | 7 ++++++- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/outputs/amqp/amqp.go b/outputs/amqp/amqp.go index 2d3f1c399..070793fdb 100644 --- a/outputs/amqp/amqp.go +++ b/outputs/amqp/amqp.go @@ -13,8 +13,8 @@ type AMQP struct { URL string // AMQP exchange Exchange string - // Routing key - RoutingTag string + // Routing Key Tag + RoutingTag string `toml:"routing_tag"` channel *amqp.Channel } @@ -24,9 +24,9 @@ var sampleConfig = ` url = "amqp://localhost:5672/influxdb" # AMQP exchange exchange = "telegraf" - # AMQP tag name used as a routing key - # If there's no tag in a point, empty routing key will be used - routing_tag = "dc" + # Telegraf tag to use as a routing key + # ie, if this tag exists, it's value will be used as the routing key + routing_tag = "host" ` func (q *AMQP) Connect() error { diff --git a/outputs/kafka/kafka.go b/outputs/kafka/kafka.go index 49a729b42..370f1ba95 100644 --- a/outputs/kafka/kafka.go +++ b/outputs/kafka/kafka.go @@ -14,6 +14,8 @@ type Kafka struct { Brokers []string // Kafka topic Topic string + // Routing Key Tag + RoutingTag string `toml:"routing_tag"` producer sarama.SyncProducer } @@ -23,6 +25,9 @@ var sampleConfig = ` brokers = ["localhost:9092"] # Kafka topic for producer messages topic = "telegraf" + # Telegraf tag to use as a routing key + # ie, if this tag exists, it's value will be used as the routing key + routing_tag = "host" ` func (k *Kafka) Connect() error { @@ -71,7 +76,7 @@ func (k *Kafka) Write(bp client.BatchPoints) error { Topic: k.Topic, Value: sarama.StringEncoder(value), } - if h, ok := p.Tags["host"]; ok { + if h, ok := p.Tags[k.RoutingTag]; ok { m.Key = sarama.StringEncoder(h) } From 211065565ff3ba050fa572bfcd5f9e333111481b Mon Sep 17 00:00:00 2001 From: Oliver Buschjost Date: Wed, 16 Sep 2015 22:55:55 +0200 Subject: [PATCH 079/125] Add HTTP 5xx stats to HAProxy plugin. Closes #194 --- plugins/haproxy/haproxy.go | 5 +++++ plugins/haproxy/haproxy_test.go | 1 + 2 files changed, 6 insertions(+) diff --git a/plugins/haproxy/haproxy.go b/plugins/haproxy/haproxy.go index df03ecced..351b4a8e7 100644 --- a/plugins/haproxy/haproxy.go +++ b/plugins/haproxy/haproxy.go @@ -245,6 +245,11 @@ func importCsvResult(r io.Reader, acc plugins.Accumulator, host string) ([][]str if err == nil { acc.Add("http_response.4xx", ival, tags) } + case HF_HRSP_5xx: + ival, err := strconv.ParseUint(v, 10, 64) + if err == nil { + acc.Add("http_response.5xx", ival, tags) + } case HF_EREQ: ival, err := strconv.ParseUint(v, 10, 64) if err == nil { diff --git a/plugins/haproxy/haproxy_test.go b/plugins/haproxy/haproxy_test.go index b54f516c9..0d63985d4 100644 --- a/plugins/haproxy/haproxy_test.go +++ b/plugins/haproxy/haproxy_test.go @@ -60,6 +60,7 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { {"http_response.2xx", 1314093}, {"http_response.3xx", 537036}, {"http_response.4xx", 123452}, + {"http_response.5xx", 11966}, {"dreq", 1102}, {"dresp", 80}, {"wretr", 17}, From 406e980faef74065361220943025e8ea2d2b07b7 Mon Sep 17 00:00:00 2001 From: Eugene Dementiev Date: Wed, 16 Sep 2015 18:31:48 +0300 Subject: [PATCH 080/125] install and init script for el5 Fixes #186 Closes #203 --- scripts/init.sh | 3 +++ scripts/package.sh | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/scripts/init.sh b/scripts/init.sh index b9339e407..d65639674 100755 --- a/scripts/init.sh +++ b/scripts/init.sh @@ -1,5 +1,8 @@ #! /usr/bin/env bash +# chkconfig: 2345 99 01 +# description: Telegraf daemon + ### BEGIN INIT INFO # Provides: telegraf # Required-Start: $all diff --git a/scripts/package.sh b/scripts/package.sh index 7f5196e33..6ca5b0dca 100755 --- a/scripts/package.sh +++ b/scripts/package.sh @@ -160,7 +160,14 @@ rm -f $INSTALL_ROOT_DIR/init.sh ln -sfn $INSTALL_ROOT_DIR/versions/$version/telegraf $INSTALL_ROOT_DIR/telegraf if ! id telegraf >/dev/null 2>&1; then + useradd --help 2>&1| grep -- --system > /dev/null 2>&1 + old_useradd=\$? + if [[ \$old_useradd == 0 ]] + then useradd --system -U -M telegraf + else + groupadd telegraf && useradd -M -g telegraf telegraf + fi fi # Systemd From 3be6d84675ea39cef7b6fb55a7b169cc8fc8a86e Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 16 Sep 2015 14:23:57 -0700 Subject: [PATCH 081/125] Catching up on some CHANGELOG updates --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e70cd98e..dc3f2eb98 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ will still be backwards compatible if only `url` is specified. ### Features - [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support - [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! +- [#203](https://github.com/influxdb/telegraf/pull/200): AMQP output. Thanks @ekini! +- [#182](https://github.com/influxdb/telegraf/pull/182): OpenTSDB output. Thanks @rplessl! ### Bugfixes - [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support @@ -16,6 +18,7 @@ will still be backwards compatible if only `url` is specified. - [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee! - [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced! - [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+ +- [#203](https://github.com/influxdb/telegraf/issues/203): EL5 rpm support. Thanks @ekini! ## v0.1.8 [2015-09-04] From 66ed4f7328872b9dc86da226d1cca62418f78b5f Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 16 Sep 2015 14:04:14 -0700 Subject: [PATCH 082/125] mysql plugin: don't emit blank tags closes #201 --- plugins/mysql/mysql.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/mysql/mysql.go b/plugins/mysql/mysql.go index d53e580c7..1825248d8 100644 --- a/plugins/mysql/mysql.go +++ b/plugins/mysql/mysql.go @@ -144,6 +144,8 @@ func (m *Mysql) gatherServer(serv string, acc plugins.Accumulator) error { var servtag string if strings.Contains(serv, "@") { servtag = strings.Split(serv, "@")[1] + } else if serv == "" { + servtag = "localhost" } else { servtag = serv } From 46cd9ff9f5741b698e93f5e22f069434310edf25 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 16 Sep 2015 15:44:29 -0700 Subject: [PATCH 083/125] Update influxdb godeps for line-protocol precision fix --- Godeps/Godeps.json | 31 +- .../github.com/influxdb/influxdb/.gitignore | 74 + .../github.com/influxdb/influxdb/CHANGELOG.md | 1676 ++ .../influxdb/influxdb/CONTRIBUTING.md | 247 + .../github.com/influxdb/influxdb/DOCKER.md | 44 + .../github.com/influxdb/influxdb/Dockerfile | 24 + .../influxdb/Dockerfile_test_ubuntu32 | 12 + .../src/github.com/influxdb/influxdb/LICENSE | 20 + .../influxdb/LICENSE_OF_DEPENDENCIES.md | 19 + .../github.com/influxdb/influxdb/QUERIES.md | 180 + .../github.com/influxdb/influxdb/README.md | 72 + .../github.com/influxdb/influxdb/balancer.go | 78 + .../influxdb/influxdb/balancer_test.go | 115 + .../influxdb/influxdb/build-docker.sh | 9 + .../influxdb/influxdb/circle-test.sh | 95 + .../github.com/influxdb/influxdb/circle.yml | 16 + .../influxdb/influxdb/client/README.md | 7 +- .../influxdb/influxdb/client/influxdb.go | 17 +- .../influxdb/influxdb/client/influxdb_test.go | 37 +- .../influxdb/influxdb/cluster/client_pool.go | 57 + .../influxdb/influxdb/cluster/config.go | 35 + .../influxdb/influxdb/cluster/config_test.go | 27 + .../influxdb/cluster/internal/data.pb.go | 155 + .../influxdb/cluster/internal/data.proto | 25 + .../influxdb/cluster/points_writer.go | 346 + .../influxdb/cluster/points_writer_test.go | 464 + .../influxdb/influxdb/cluster/rpc.go | 164 + .../influxdb/influxdb/cluster/rpc_test.go | 110 + .../influxdb/influxdb/cluster/service.go | 351 + .../influxdb/influxdb/cluster/service_test.go | 104 + .../influxdb/influxdb/cluster/shard_mapper.go | 196 + .../influxdb/cluster/shard_mapper_test.go | 114 + .../influxdb/influxdb/cluster/shard_writer.go | 163 + .../influxdb/cluster/shard_writer_test.go | 186 + .../influxdb/influxdb/cmd/influx/main.go | 779 + .../influxdb/influxdb/cmd/influx/main_test.go | 219 + .../cmd/influx_stress/influx_stress.go | 67 + .../influxdb/cmd/influxd/backup/backup.go | 170 + .../cmd/influxd/backup/backup_test.go | 125 + .../influxdb/cmd/influxd/help/help.go | 46 + .../influxdb/influxdb/cmd/influxd/main.go | 200 + .../influxdb/cmd/influxd/restore/restore.go | 250 + .../cmd/influxd/restore/restore_test.go | 155 + .../influxdb/cmd/influxd/run/command.go | 233 + .../influxdb/cmd/influxd/run/config.go | 225 + .../cmd/influxd/run/config_command.go | 83 + .../influxdb/cmd/influxd/run/config_test.go | 142 + .../influxdb/cmd/influxd/run/server.go | 585 + .../cmd/influxd/run/server_helpers_test.go | 356 + .../influxdb/cmd/influxd/run/server_test.go | 4374 +++++ .../influxdb/cmd/influxd/run/server_test.md | 150 + .../influxdb/influxdb/cmd/inspect/main.go | 142 + .../github.com/influxdb/influxdb/errors.go | 78 + .../influxdb/influxdb/etc/burn-in/.rvmrc | 1 + .../influxdb/influxdb/etc/burn-in/Gemfile | 4 + .../influxdb/etc/burn-in/Gemfile.lock | 14 + .../influxdb/influxdb/etc/burn-in/burn-in.rb | 79 + .../influxdb/influxdb/etc/burn-in/log.rb | 23 + .../influxdb/etc/burn-in/random_gaussian.rb | 31 + .../influxdb/etc/burn-in/random_points.rb | 29 + .../influxdb/influxdb/etc/config.sample.toml | 262 + .../influxdb/influxdb/importer/README.md | 193 + .../influxdb/influxdb/importer/v8/importer.go | 236 + .../influxdb/influxdb/influxql/INFLUXQL.md | 40 +- .../influxdb/influxdb/influxql/ast.go | 269 +- .../influxdb/influxdb/influxql/ast_test.go | 2 +- .../influxdb/influxql/functions_test.go | 534 - .../influxdb/influxdb/influxql/parser.go | 81 +- .../influxdb/influxdb/influxql/parser_test.go | 224 +- .../influxdb/influxdb/influxql/scanner.go | 2 + .../influxdb/influxql/scanner_test.go | 2 + .../influxdb/influxdb/influxql/token.go | 6 + .../github.com/influxdb/influxdb/influxvar.go | 45 + .../github.com/influxdb/influxdb/meta/data.go | 112 +- .../influxdb/influxdb/meta/data_test.go | 62 +- .../influxdb/influxdb/meta/errors.go | 4 + .../influxdb/meta/internal/meta.pb.go | 31 +- .../influxdb/meta/internal/meta.proto | 9 +- .../influxdb/influxdb/meta/rpc_test.go | 2 +- .../influxdb/influxdb/meta/state.go | 2 - .../influxdb/meta/statement_executor.go | 52 + .../influxdb/meta/statement_executor_test.go | 71 +- .../influxdb/influxdb/meta/store.go | 55 +- .../influxdb/influxdb/meta/store_test.go | 60 +- .../influxdb/influxdb/monitor/README.md | 47 + .../influxdb/influxdb/monitor/build_info.go | 18 + .../influxdb/influxdb/monitor/config.go | 35 + .../influxdb/influxdb/monitor/config_test.go | 30 + .../influxdb/influxdb/monitor/go_runtime.go | 19 + .../influxdb/influxdb/monitor/network.go | 21 + .../influxdb/influxdb/monitor/service.go | 406 + .../influxdb/influxdb/monitor/service_test.go | 71 + .../influxdb/monitor/statement_executor.go | 65 + .../influxdb/influxdb/monitor/system.go | 26 + .../github.com/influxdb/influxdb/nightly.sh | 14 + .../github.com/influxdb/influxdb/package.sh | 535 + .../influxdb/scripts/influxdb.service | 19 + .../influxdb/influxdb/scripts/init.sh | 218 + .../influxdb/influxdb/scripts/logrotate | 8 + .../influxdb/services/admin/config.go | 21 + .../influxdb/services/admin/config_test.go | 32 + .../influxdb/services/admin/service.go | 111 + .../influxdb/services/admin/service_test.go | 33 + .../services/collectd/collectd_test.conf | 209 + .../influxdb/services/collectd/config.go | 48 + .../influxdb/services/collectd/config_test.go | 32 + .../influxdb/services/collectd/service.go | 307 + .../services/collectd/service_test.go | 501 + .../services/collectd/test_client/README.md | 3 + .../services/collectd/test_client/client.go | 71 + .../services/continuous_querier/config.go | 65 + .../continuous_querier/config_test.go | 36 + .../continuous_querier/continuous_queries.md | 236 + .../services/continuous_querier/service.go | 507 + .../continuous_querier/service_test.go | 548 + .../services/copier/internal/internal.pb.go | 57 + .../services/copier/internal/internal.proto | 9 + .../influxdb/services/copier/service.go | 261 + .../influxdb/services/copier/service_test.go | 184 + .../influxdb/services/graphite/README.md | 131 + .../influxdb/services/graphite/config.go | 221 + .../influxdb/services/graphite/config_test.go | 167 + .../influxdb/services/graphite/parser.go | 349 + .../influxdb/services/graphite/parser_test.go | 548 + .../influxdb/services/graphite/service.go | 372 + .../services/graphite/service_test.go | 183 + .../influxdb/influxdb/services/hh/config.go | 44 + .../influxdb/services/hh/config_test.go | 45 + .../influxdb/influxdb/services/hh/doc.go | 5 + .../influxdb/influxdb/services/hh/limiter.go | 61 + .../influxdb/services/hh/limiter_test.go | 47 + .../influxdb/services/hh/processor.go | 218 + .../influxdb/services/hh/processor_test.go | 80 + .../influxdb/influxdb/services/hh/queue.go | 666 + .../influxdb/services/hh/queue_test.go | 327 + .../influxdb/influxdb/services/hh/service.go | 136 + .../influxdb/services/httpd/config.go | 22 + .../influxdb/services/httpd/config_test.go | 52 + .../influxdb/services/httpd/handler.go | 908 + .../influxdb/services/httpd/handler_test.go | 450 + .../services/httpd/response_logger.go | 153 + .../influxdb/services/httpd/service.go | 137 + .../influxdb/services/opentsdb/README.md | 10 + .../influxdb/services/opentsdb/config.go | 57 + .../influxdb/services/opentsdb/config_test.go | 38 + .../influxdb/services/opentsdb/handler.go | 181 + .../influxdb/services/opentsdb/service.go | 366 + .../services/opentsdb/service_test.go | 167 + .../influxdb/services/precreator/README.md | 13 + .../influxdb/services/precreator/config.go | 32 + .../services/precreator/config_test.go | 31 + .../influxdb/services/precreator/service.go | 100 + .../services/precreator/service_test.go | 59 + .../influxdb/services/retention/config.go | 16 + .../services/retention/config_test.go | 27 + .../influxdb/services/retention/service.go | 129 + .../influxdb/services/snapshotter/service.go | 145 + .../services/snapshotter/service_test.go | 1 + .../influxdb/influxdb/services/udp/config.go | 44 + .../influxdb/services/udp/config_test.go | 39 + .../influxdb/influxdb/services/udp/service.go | 188 + .../influxdb/influxdb/shared/admin/README.md | 15 + .../influxdb/shared/admin/css/admin.css | 87 + .../influxdb/shared/admin/css/bootstrap.css | 6584 +++++++ .../admin/css/dropdowns-enhancement.css | 294 + .../fonts/glyphicons-halflings-regular.eot | Bin 0 -> 20127 bytes .../fonts/glyphicons-halflings-regular.svg | 288 + .../fonts/glyphicons-halflings-regular.ttf | Bin 0 -> 45404 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../shared/admin/img/influxdb-light400.png | Bin 0 -> 19775 bytes .../influxdb/influxdb/shared/admin/index.html | 198 + .../influxdb/shared/admin/js/admin.js | 464 + .../admin/js/vendor/bootstrap-3.3.5.min.js | 7 + .../admin/js/vendor/jquery-2.1.4.min.js | 4 + .../admin/js/vendor/react-0.13.3.min.js | 16 + .../influxdb/influxdb/statik/statik.go | 10 + .../influxdb/influxdb/stress/runner.go | 269 + .../influxdb/influxdb/stress/runner_test.go | 208 + .../github.com/influxdb/influxdb/tcp/mux.go | 154 + .../influxdb/influxdb/tcp/mux_test.go | 137 + .../influxdb/influxdb/test-32bit-docker.sh | 4 + .../influxdb/influxdb/tests/README.md | 4 + .../influxdb/tests/create_future_writes.sh | 22 + .../tests/create_write_multiple_query.sh | 14 + .../tests/create_write_single_query.sh | 19 + ..._with_multiple_measurements_values_tags.sh | 23 + ...e_write_single_with_multiple_tags_query.sh | 11 + .../influxdb/tests/distinct-data-scenarios.sh | 35 + .../influxdb/tests/read_write_gzip.sh | 15 + .../influxdb/influxdb/tests/siege/.gitignore | 1 + .../influxdb/influxdb/tests/siege/README.md | 66 + .../influxdb/influxdb/tests/siege/urlgen | 107 + .../influxdb/influxdb/tests/tmux/3_shards | 28 + .../influxdb/influxdb/tests/tmux/README.md | 31 + .../influxdb/influxdb/tests/tmux/sample.json | 16000 ++++++++++++++++ .../influxdb/influxdb/tests/tmux/seed.sh | 13 + .../influxdb/tests/tmux/server_8086.toml | 7 + .../influxdb/tests/tmux/server_8087.toml | 7 + .../influxdb/tests/tmux/server_8088.toml | 7 + .../influxdb/influxdb/tests/urlgen/urlgen.go | 58 + .../influxdb/influxdb/toml/toml_test.go | 9 +- .../influxdb/influxdb/tsdb/README.md | 2 +- .../influxdb/influxdb/tsdb/batcher.go | 9 +- .../influxdb/influxdb/tsdb/batcher_test.go | 31 +- .../influxdb/influxdb/tsdb/config.go | 6 +- .../influxdb/influxdb/tsdb/cursor.go | 47 +- .../influxdb/influxdb/tsdb/cursor_test.go | 146 +- .../influxdb/influxdb/tsdb/engine.go | 13 +- .../influxdb/influxdb/tsdb/engine/b1/b1.go | 103 +- .../influxdb/tsdb/engine/b1/b1_test.go | 72 +- .../influxdb/influxdb/tsdb/engine/bz1/bz1.go | 194 +- .../influxdb/tsdb/engine/bz1/bz1_test.go | 282 +- .../influxdb/influxdb/tsdb/engine/wal/wal.go | 841 +- .../influxdb/tsdb/engine/wal/wal_test.go | 671 +- .../influxdb/influxdb/tsdb/executor.go | 186 +- .../influxdb/influxdb/tsdb/executor_test.go | 120 +- .../influxdb/{influxql => tsdb}/functions.go | 727 +- .../influxdb/influxdb/tsdb/functions_test.go | 754 + .../influxdb/influxdb/tsdb/mapper.go | 651 +- .../influxdb/influxdb/tsdb/mapper_test.go | 26 +- .../github.com/influxdb/influxdb/tsdb/meta.go | 41 +- .../influxdb/influxdb/tsdb/monitor.go | 83 - .../influxdb/influxdb/tsdb/points.go | 302 +- .../influxdb/influxdb/tsdb/points_test.go | 207 +- .../influxdb/influxdb/tsdb/query_executor.go | 152 +- .../influxdb/tsdb/query_executor_test.go | 44 +- .../influxdb/influxdb/tsdb/shard.go | 95 +- .../influxdb/influxdb/tsdb/shard_test.go | 24 +- .../influxdb/tsdb/show_measurements.go | 237 + .../influxdb/influxdb/tsdb/store.go | 82 +- .../github.com/influxdb/influxdb/uuid/uuid.go | 93 + 232 files changed, 56889 insertions(+), 2522 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/balancer.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/balancer_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/inspect/main.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/functions_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/influxvar.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/build_info.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/go_runtime.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/network.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/statement_executor.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/system.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/nightly.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/package.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/influxdb.service create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/init.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/logrotate create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/client.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/continuous_queries.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/copier/internal/internal.pb.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/copier/internal/internal.proto create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/copier/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/copier/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/doc.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/limiter.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/limiter_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/processor.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/processor_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/queue.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/queue_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/response_logger.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/handler.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/snapshotter/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/snapshotter/service_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/udp/config.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/udp/config_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/udp/service.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/css/admin.css create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/css/bootstrap.css create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/css/dropdowns-enhancement.css create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.eot create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.svg create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.ttf create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.woff create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.woff2 create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/img/influxdb-light400.png create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/index.html create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/js/admin.js create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/js/vendor/bootstrap-3.3.5.min.js create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/js/vendor/jquery-2.1.4.min.js create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/js/vendor/react-0.13.3.min.js create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/statik/statik.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/stress/runner.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/stress/runner_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tcp/mux.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tcp/mux_test.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/test-32bit-docker.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_future_writes.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_write_multiple_query.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_write_single_query.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_write_single_with_multiple_measurements_values_tags.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_write_single_with_multiple_tags_query.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/distinct-data-scenarios.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/read_write_gzip.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/siege/.gitignore create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/siege/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/siege/urlgen create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/3_shards create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/README.md create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/sample.json create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/seed.sh create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/server_8086.toml create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/server_8087.toml create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/server_8088.toml create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/urlgen/urlgen.go rename Godeps/_workspace/src/github.com/influxdb/influxdb/{influxql => tsdb}/functions.go (62%) create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/functions_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/monitor.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/show_measurements.go create mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/uuid/uuid.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index f6a196913..ffd67e498 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -96,34 +96,9 @@ "Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee" }, { - "ImportPath": "github.com/influxdb/influxdb/client", - "Comment": "v0.9.3-rc1", - "Rev": "f4077764b2bb2b03241452d88e9db321c62bb560" - }, - { - "ImportPath": "github.com/influxdb/influxdb/influxql", - "Comment": "v0.9.3-rc1", - "Rev": "f4077764b2bb2b03241452d88e9db321c62bb560" - }, - { - "ImportPath": "github.com/influxdb/influxdb/meta", - "Comment": "v0.9.3-rc1", - "Rev": "f4077764b2bb2b03241452d88e9db321c62bb560" - }, - { - "ImportPath": "github.com/influxdb/influxdb/snapshot", - "Comment": "v0.9.3-rc1", - "Rev": "f4077764b2bb2b03241452d88e9db321c62bb560" - }, - { - "ImportPath": "github.com/influxdb/influxdb/toml", - "Comment": "v0.9.3-rc1", - "Rev": "f4077764b2bb2b03241452d88e9db321c62bb560" - }, - { - "ImportPath": "github.com/influxdb/influxdb/tsdb", - "Comment": "v0.9.3-rc1", - "Rev": "f4077764b2bb2b03241452d88e9db321c62bb560" + "ImportPath": "github.com/influxdb/influxdb", + "Comment": "v0.9.4-rc1-84-g6d4319d", + "Rev": "6d4319d244b47db94b79c505a16e00e7ac02ebed" }, { "ImportPath": "github.com/lib/pq", diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore b/Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore new file mode 100644 index 000000000..7f3f51d4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore @@ -0,0 +1,74 @@ +*~ +src/ + +config.json +/bin/ + +/pkg/ + +TAGS + +# vim temp files +*.swp + +*.test +/query/a.out* +.DS_Store + +# ignore generated files. +cmd/influxd/version.go + +# executables + +influx_stress +**/influx_stress +!**/influx_stress/ + +influxd +**/influxd +!**/influxd/ + +influx +**/influx +!**/influx/ + +influxdb +**/influxdb +!**/influxdb/ + +/benchmark-tool +/main +/benchmark-storage +godef +gosym +gocode +inspect-raft + +# dependencies +out_rpm/ +packages/ + +# autconf +autom4te.cache/ +config.log +config.status +Makefile + +# log file +influxdb.log +benchmark.log + +# config file +config.toml + +# test data files +integration/migration_data/ + +# goide project files +.idea + +# goconvey config files +*.goconvey + +// Ingnore SourceGraph directory +.srclib-store/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md new file mode 100644 index 000000000..bb3a63866 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md @@ -0,0 +1,1676 @@ +## v0.9.5 [unreleased] + +### Features +- [#4065](https://github.com/influxdb/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex + +### Bugfixes +- [#3457](https://github.com/influxdb/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name +- [#4111](https://github.com/influxdb/influxdb/pull/4111): Update pre-commit hook for go vet composites + +## v0.9.4 [2015-09-14] + +### Release Notes +With this release InfluxDB is moving to Go 1.5. + +### Features +- [#4050](https://github.com/influxdb/influxdb/pull/4050): Add stats to collectd +- [#3771](https://github.com/influxdb/influxdb/pull/3771): Close idle Graphite TCP connections +- [#3755](https://github.com/influxdb/influxdb/issues/3755): Add option to build script. Thanks @fg2it +- [#3863](https://github.com/influxdb/influxdb/pull/3863): Move to Go 1.5 +- [#3892](https://github.com/influxdb/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE +- [#3916](https://github.com/influxdb/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. +- [#3901](https://github.com/influxdb/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki +- [#4048](https://github.com/influxdb/influxdb/pull/4048): Add statistics to Continuous Query service +- [#4049](https://github.com/influxdb/influxdb/pull/4049): Add stats to the UDP input +- [#3876](https://github.com/influxdb/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT +- [#3975](https://github.com/influxdb/influxdb/pull/3975): Add shard copy service +- [#3986](https://github.com/influxdb/influxdb/pull/3986): Support sorting by time desc +- [#3930](https://github.com/influxdb/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdb/influxdb/issues/1821) +- [#4045](https://github.com/influxdb/influxdb/pull/4045): Instrument cluster-level points writer +- [#3996](https://github.com/influxdb/influxdb/pull/3996): Add statistics to httpd package +- [#4003](https://github.com/influxdb/influxdb/pull/4033): Add logrotate configuration. +- [#4043](https://github.com/influxdb/influxdb/pull/4043): Add stats and batching to openTSDB input +- [#4042](https://github.com/influxdb/influxdb/pull/4042): Add pending batches control to batcher +- [#4006](https://github.com/influxdb/influxdb/pull/4006): Add basic statistics for shards +- [#4072](https://github.com/influxdb/influxdb/pull/4072): Add statistics for the WAL. + +### Bugfixes +- [#4042](https://github.com/influxdb/influxdb/pull/4042): Set UDP input batching defaults as needed. +- [#3785](https://github.com/influxdb/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic +- [#3804](https://github.com/influxdb/influxdb/pull/3804): init.d script fixes, fixes issue 3803. +- [#3823](https://github.com/influxdb/influxdb/pull/3823): Deterministic ordering for first() and last() +- [#3869](https://github.com/influxdb/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin +- [#3856](https://github.com/influxdb/influxdb/pull/3856): Minor changes to retention enforcement. +- [#3884](https://github.com/influxdb/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup +- [#3868](https://github.com/influxdb/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. +- [#3886](https://github.com/influxdb/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL +- [#3574](https://github.com/influxdb/influxdb/issues/3574): Querying data node causes panic +- [#3913](https://github.com/influxdb/influxdb/issues/3913): Convert meta shard owners to objects +- [#4026](https://github.com/influxdb/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdb/influxdb/issues/3636) +- [#3927](https://github.com/influxdb/influxdb/issues/3927): Add WAL lock to prevent timing lock contention +- [#3928](https://github.com/influxdb/influxdb/issues/3928): Write fails for multiple points when tag starts with quote +- [#3901](https://github.com/influxdb/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! +- [#3950](https://github.com/influxdb/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI +- [#3977](https://github.com/influxdb/influxdb/pull/3977): Silence wal logging during testing +- [#3931](https://github.com/influxdb/influxdb/pull/3931): Don't precreate shard groups entirely in the past +- [#3960](https://github.com/influxdb/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster +- [#3980](https://github.com/influxdb/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. +- [#4016](https://github.com/influxdb/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. +- [#4034](https://github.com/influxdb/influxdb/pull/4034): Rollback bolt tx on mapper open error +- [#3848](https://github.com/influxdb/influxdb/issues/3848): restart influxdb causing panic +- [#3881](https://github.com/influxdb/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference +- [#3926](https://github.com/influxdb/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdb/influxdb/pull/4038) +- [#4053](https://github.com/influxdb/influxdb/pull/4053): Prohibit dropping default retention policy. +- [#4060](https://github.com/influxdb/influxdb/pull/4060): Don't log EOF error in openTSDB input. +- [#3978](https://github.com/influxdb/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause +- [#4058](https://github.com/influxdb/influxdb/pull/4058): Disable bz1 recompression +- [#3902](https://github.com/influxdb/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" +- [#3718](https://github.com/influxdb/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse + +## v0.9.3 [2015-08-26] + +### Release Notes + +There are breaking changes in this release. + - To store data points as integers you must now append `i` to the number if using the line protocol. + - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. + - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) for more details. + - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. + +Please see the *Features* section below for full details. + +### Features +- [#3376](https://github.com/influxdb/influxdb/pull/3376): Support for remote shard query mapping +- [#3372](https://github.com/influxdb/influxdb/pull/3372): Support joining nodes to existing cluster +- [#3426](https://github.com/influxdb/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 +- [#3478](https://github.com/influxdb/influxdb/pull/3478): Support incremental cluster joins +- [#3519](https://github.com/influxdb/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers +- [#3529](https://github.com/influxdb/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc +- [#3421](https://github.com/influxdb/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes +- [#3502](https://github.com/influxdb/influxdb/pull/3502): Importer for 0.8.9 data via the CLI +- [#3564](https://github.com/influxdb/influxdb/pull/3564): Fix alias, maintain column sort order +- [#3585](https://github.com/influxdb/influxdb/pull/3585): Additional test coverage for non-existent fields +- [#3246](https://github.com/influxdb/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables +- [#3599](https://github.com/influxdb/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale +- [#3636](https://github.com/influxdb/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 +- [#3641](https://github.com/influxdb/influxdb/pull/3641): Logging enhancements and single-node rename +- [#3635](https://github.com/influxdb/influxdb/pull/3635): Add build branch to version output. +- [#3115](https://github.com/influxdb/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. +- [#3628](https://github.com/influxdb/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries +- [#3721](https://github.com/influxdb/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch +- [#3514](https://github.com/influxdb/influxdb/issues/3514): Implement WAL outside BoltDB with compaction +- [#3544](https://github.com/influxdb/influxdb/pull/3544): Implement compression on top of BoltDB +- [#3795](https://github.com/influxdb/influxdb/pull/3795): Throttle import +- [#3584](https://github.com/influxdb/influxdb/pull/3584): Import/export documenation + +### Bugfixes +- [#3405](https://github.com/influxdb/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 +- [#3411](https://github.com/influxdb/influxdb/issues/3411): 500 timeout on write +- [#3420](https://github.com/influxdb/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. +- [#3404](https://github.com/influxdb/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 +- [#3414](https://github.com/influxdb/influxdb/issues/3414): Shard mappers perform query re-writing +- [#3525](https://github.com/influxdb/influxdb/pull/3525): check if fields are valid during parse time. +- [#3511](https://github.com/influxdb/influxdb/issues/3511): Sending a large number of tag causes panic +- [#3288](https://github.com/influxdb/influxdb/issues/3288): Run go fuzz on the line-protocol input +- [#3545](https://github.com/influxdb/influxdb/issues/3545): Fix parsing string fields with newlines +- [#3579](https://github.com/influxdb/influxdb/issues/3579): Revert breaking change to `client.NewClient` function +- [#3580](https://github.com/influxdb/influxdb/issues/3580): Do not allow wildcards with fields in select statements +- [#3530](https://github.com/influxdb/influxdb/pull/3530): Aliasing a column no longer works +- [#3436](https://github.com/influxdb/influxdb/issues/3436): Fix panic in hinted handoff queue processor +- [#3401](https://github.com/influxdb/influxdb/issues/3401): Derivative on non-numeric fields panics db +- [#3583](https://github.com/influxdb/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic +- [#3611](https://github.com/influxdb/influxdb/pull/3611): Fix query arithmetic with integers +- [#3326](https://github.com/influxdb/influxdb/issues/3326): simple regex query fails with cryptic error +- [#3618](https://github.com/influxdb/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger +- [#3625](https://github.com/influxdb/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement +- [#3629](https://github.com/influxdb/influxdb/pull/3629): Use sensible batching defaults for Graphite. +- [#3638](https://github.com/influxdb/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field +- [#3640](https://github.com/influxdb/influxdb/pull/3640): Shutdown Graphite service when signal received. +- [#3632](https://github.com/influxdb/influxdb/issues/3632): Make single-node host renames more seamless +- [#3656](https://github.com/influxdb/influxdb/issues/3656): Silence snapshotter logger for testing +- [#3651](https://github.com/influxdb/influxdb/pull/3651): Fully remove series when dropped. +- [#3517](https://github.com/influxdb/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. +- [#3522](https://github.com/influxdb/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. +- [#3646](https://github.com/influxdb/influxdb/pull/3646): Fix nil FieldCodec panic. +- [#3672](https://github.com/influxdb/influxdb/pull/3672): Reduce in-memory index by 20%-30% +- [#3673](https://github.com/influxdb/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. +- [#3676](https://github.com/influxdb/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. +- [#3686](https://github.com/influxdb/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. +- [#3687](https://github.com/influxdb/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff +- [#3697](https://github.com/influxdb/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. +- [#3708](https://github.com/influxdb/influxdb/issues/3708): Fix double escaping measurement name during cluster replication +- [#3704](https://github.com/influxdb/influxdb/issues/3704): cluster replication issue for measurement name containing backslash +- [#3681](https://github.com/influxdb/influxdb/issues/3681): Quoted measurement names fail +- [#3681](https://github.com/influxdb/influxdb/issues/3682): Fix inserting string value with backslashes +- [#3735](https://github.com/influxdb/influxdb/issues/3735): Append to small bz1 blocks +- [#3736](https://github.com/influxdb/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme +- [#3539](https://github.com/influxdb/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always +- [#3790](https://github.com/influxdb/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values +- [#3778](https://github.com/influxdb/influxdb/pull/3778): Don't panic if SELECT on time. +- [#3824](https://github.com/influxdb/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types +- [#3828](https://github.com/influxdb/influxdb/pull/3828): Support all number types when decoding a point +- [#3853](https://github.com/influxdb/influxdb/pull/3853): Use 4KB default block size for bz1 +- [#3607](https://github.com/influxdb/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! + +## v0.9.2 [2015-07-24] + +### Features +- [#3177](https://github.com/influxdb/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham +- [#3299](https://github.com/influxdb/influxdb/pull/3299): Refactor query engine for distributed query support. +- [#3334](https://github.com/influxdb/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho + +### Bugfixes + +- [#3180](https://github.com/influxdb/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. +- [#3218](https://github.com/influxdb/influxdb/pull/3218): Allow write timeouts to be configurable. +- [#3184](https://github.com/influxdb/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! +- [#3236](https://github.com/influxdb/influxdb/pull/3236): Fix display issues in admin interface. +- [#3232](https://github.com/influxdb/influxdb/pull/3232): Set logging prefix for metastore. +- [#3230](https://github.com/influxdb/influxdb/issues/3230): panic: unable to parse bool value +- [#3245](https://github.com/influxdb/influxdb/issues/3245): Error using graphite plugin with multiple filters +- [#3223](https://github.com/influxdb/influxdb/issues/323): default graphite template cannot have extra tags +- [#3255](https://github.com/influxdb/influxdb/pull/3255): Flush WAL on start-up as soon as possible. +- [#3289](https://github.com/influxdb/influxdb/issues/3289): InfluxDB crashes on floats without decimal +- [#3298](https://github.com/influxdb/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 +- [#3152](https://github.com/influxdb/influxdb/issues/3159): High CPU Usage with unsorted writes +- [#3307](https://github.com/influxdb/influxdb/pull/3307): Fix regression parsing boolean values True/False +- [#3304](https://github.com/influxdb/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 +- [#3332](https://github.com/influxdb/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. +- [#3335](https://github.com/influxdb/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report +- [#2761](https://github.com/influxdb/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. +- [#3356](https://github.com/influxdb/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. +- [#3351](https://github.com/influxdb/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel +- [#3244](https://github.com/influxdb/influxdb/pull/3244): Wire up admin privilege grant and revoke. +- [#3259](https://github.com/influxdb/influxdb/issues/3259): Respect privileges for queries. +- [#3256](https://github.com/influxdb/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. +- [#3380](https://github.com/influxdb/influxdb/issue/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. +- [#3319](https://github.com/influxdb/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces +- [#3453](https://github.com/influxdb/influxdb/issues/3453): Remove outdated `dump` command from CLI. +- [#3463](https://github.com/influxdb/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. + +## v0.9.1 [2015-07-02] + +### Features + +- [2650](https://github.com/influxdb/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. +- [3125](https://github.com/influxdb/influxdb/pull/3125): Graphite Input Protocol Parsing +- [2746](https://github.com/influxdb/influxdb/pull/2746): New Admin UI/interface +- [3036](https://github.com/influxdb/influxdb/pull/3036): Write Ahead Log (WAL) +- [3014](https://github.com/influxdb/influxdb/issues/3014): Implement Raft snapshots + +### Bugfixes + +- [3013](https://github.com/influxdb/influxdb/issues/3013): Panic error with inserting values with commas +- [#2956](https://github.com/influxdb/influxdb/issues/2956): Type mismatch in derivative +- [#2908](https://github.com/influxdb/influxdb/issues/2908): Field mismatch error messages need to be updated +- [#2931](https://github.com/influxdb/influxdb/pull/2931): Services and reporting should wait until cluster has leader. +- [#2943](https://github.com/influxdb/influxdb/issues/2943): Ensure default retention policies are fully replicated +- [#2948](https://github.com/influxdb/influxdb/issues/2948): Field mismatch error message to include measurement name +- [#2919](https://github.com/influxdb/influxdb/issues/2919): Unable to insert negative floats +- [#2935](https://github.com/influxdb/influxdb/issues/2935): Hook CPU and memory profiling back up. +- [#2960](https://github.com/influxdb/influxdb/issues/2960): Cluster Write Errors. +- [#2928](https://github.com/influxdb/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. +- [#2969](https://github.com/influxdb/influxdb/pull/2969): Actually set HTTP version in responses. +- [#2993](https://github.com/influxdb/influxdb/pull/2993): Don't log each UDP batch. +- [#2994](https://github.com/influxdb/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. +- [#3002](https://github.com/influxdb/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. +- [#3021](https://github.com/influxdb/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. +- [#3027](https://github.com/influxdb/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. +- [#3030](https://github.com/influxdb/influxdb/pull/3030): Fix excessive logging of shard creation. +- [#3038](https://github.com/influxdb/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. +- [#3033](https://github.com/influxdb/influxdb/pull/3033): Add support for marshaling `uint64` in client. +- [#3090](https://github.com/influxdb/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. +- [#2944](https://github.com/influxdb/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. +- [#3075](https://github.com/influxdb/influxdb/pull/3075): GROUP BY correctly when different tags have same value. +- [#3078](https://github.com/influxdb/influxdb/pull/3078): Fix CLI panic on malformed INSERT. +- [#2102](https://github.com/influxdb/influxdb/issues/2102): Re-work Graphite input and metric processing +- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing +- [#3136](https://github.com/influxdb/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. +- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing +- [#3127](https://github.com/influxdb/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd +- [#3131](https://github.com/influxdb/influxdb/pull/3131): Copy batch tags to each point before marshalling +- [#3155](https://github.com/influxdb/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. +- [#2678](https://github.com/influxdb/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value +- [#3061](https://github.com/influxdb/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database +- [#2608](https://github.com/influxdb/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic +- [#3183](https://github.com/influxdb/influxdb/issues/3183): using line protocol measurement names cannot contain commas +- [#3193](https://github.com/influxdb/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd +- [#3102](https://github.com/influxdb/influxdb/issues/3102): Add authentication cache +- [#3209](https://github.com/influxdb/influxdb/pull/3209): Dump Run() errors to stderr +- [#3217](https://github.com/influxdb/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. + +## v0.9.0 [2015-06-11] + +### Bugfixes + +- [#2869](https://github.com/influxdb/influxdb/issues/2869): Adding field to existing measurement causes panic +- [#2849](https://github.com/influxdb/influxdb/issues/2849): RC32: Frequent write errors +- [#2700](https://github.com/influxdb/influxdb/issues/2700): Incorrect error message in database EncodeFields +- [#2897](https://github.com/influxdb/influxdb/pull/2897): Ensure target Graphite database exists +- [#2898](https://github.com/influxdb/influxdb/pull/2898): Ensure target openTSDB database exists +- [#2895](https://github.com/influxdb/influxdb/pull/2895): Use Graphite input defaults where necessary +- [#2900](https://github.com/influxdb/influxdb/pull/2900): Use openTSDB input defaults where necessary +- [#2886](https://github.com/influxdb/influxdb/issues/2886): Refactor backup & restore +- [#2804](https://github.com/influxdb/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! +- [#2906](https://github.com/influxdb/influxdb/pull/2906): Restrict replication factor to the cluster size +- [#2905](https://github.com/influxdb/influxdb/pull/2905): Restrict clusters to 3 peers +- [#2904](https://github.com/influxdb/influxdb/pull/2904): Re-enable server reporting. +- [#2917](https://github.com/influxdb/influxdb/pull/2917): Fix int64 field values. +- [#2920](https://github.com/influxdb/influxdb/issues/2920): Ensure collectd database exists + +## v0.9.0-rc33 [2015-06-09] + +### Bugfixes + +- [#2816](https://github.com/influxdb/influxdb/pull/2816): Enable UDP service. Thanks @renan- +- [#2824](https://github.com/influxdb/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao +- [#2823](https://github.com/influxdb/influxdb/pull/2823): Convert OpenTSDB to a service. +- [#2838](https://github.com/influxdb/influxdb/pull/2838): Set auto-created retention policy period to infinite. +- [#2829](https://github.com/influxdb/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. +- [#2814](https://github.com/influxdb/influxdb/issues/2814): Convert collectd to a service. +- [#2852](https://github.com/influxdb/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo +- [#2857](https://github.com/influxdb/influxdb/issues/2857): Fix parsing commas in string field values. +- [#2833](https://github.com/influxdb/influxdb/pull/2833): Make the default config valid. +- [#2859](https://github.com/influxdb/influxdb/pull/2859): Fix panic on aggregate functions. +- [#2878](https://github.com/influxdb/influxdb/pull/2878): Re-enable shard precreation. +- [2865](https://github.com/influxdb/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. + +### Features +- [2858](https://github.com/influxdb/influxdb/pull/2858): Support setting openTSDB write consistency. + +## v0.9.0-rc32 [2015-06-07] + +### Release Notes + +This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. + +### Features +- [#1997](https://github.com/influxdb/influxdb/pull/1997): Update SELECT * to return tag values. +- [#2599](https://github.com/influxdb/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. +- [#2682](https://github.com/influxdb/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md +- [#2683](https://github.com/influxdb/influxdb/issues/2683): Add batching support to Graphite inputs. +- [#2687](https://github.com/influxdb/influxdb/issues/2687): Add batching support to Collectd inputs. +- [#2696](https://github.com/influxdb/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. +- [#2751](https://github.com/influxdb/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. +- [#2684](https://github.com/influxdb/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! + +### Bugfixes +- [#2776](https://github.com/influxdb/influxdb/issues/2776): Re-implement retention policy enforcement. +- [#2635](https://github.com/influxdb/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. +- [#2644](https://github.com/influxdb/influxdb/issues/2644): Make SHOW queries work with FROM //. +- [#2501](https://github.com/influxdb/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart +- [#2647](https://github.com/influxdb/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! + +## v0.9.0-rc31 [2015-05-21] + +### Features +- [#1822](https://github.com/influxdb/influxdb/issues/1822): Wire up DERIVATIVE aggregate +- [#1477](https://github.com/influxdb/influxdb/issues/1477): Wire up non_negative_derivative function +- [#2557](https://github.com/influxdb/influxdb/issues/2557): Fix false positive error with `GROUP BY time` +- [#1891](https://github.com/influxdb/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate +- [#1989](https://github.com/influxdb/influxdb/issues/1989): Implement `SELECT tagName FROM m` + +### Bugfixes +- [#2545](https://github.com/influxdb/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. +- [#2558](https://github.com/influxdb/influxdb/pull/2558): Fix client response check - thanks @vladlopes! +- [#2566](https://github.com/influxdb/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. +- [#2602](https://github.com/influxdb/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. +- [#2610](https://github.com/influxdb/influxdb/pull/2610): Fix shard group creation +- [#2596](https://github.com/influxdb/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. +- [#2592](https://github.com/influxdb/influxdb/pull/2592): Should return an error if user attempts to group by a field. +- [#2499](https://github.com/influxdb/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. +- [#2612](https://github.com/influxdb/influxdb/pull/2612): Query planner should validate distinct is passed a field. +- [#2531](https://github.com/influxdb/influxdb/issues/2531): Fix select with 3 or more terms in where clause. +- [#2564](https://github.com/influxdb/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. + +## PRs +- [#2569](https://github.com/influxdb/influxdb/pull/2569): Add derivative functions +- [#2598](https://github.com/influxdb/influxdb/pull/2598): Implement tag support in SELECT statements +- [#2624](https://github.com/influxdb/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. + +## v0.9.0-rc30 [2015-05-12] + +### Release Notes + +This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. + +### Features +- [#2254](https://github.com/influxdb/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate +- [#2525](https://github.com/influxdb/influxdb/pull/2525): Serve broker diagnostics over HTTP +- [#2186](https://github.com/influxdb/influxdb/pull/2186): The default status code for queries is now `200 OK` +- [#2298](https://github.com/influxdb/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! +- [#2549](https://github.com/influxdb/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. +- [#2568](https://github.com/influxdb/influxdb/pull/2568): Wire up SELECT DISTINCT. + +### Bugfixes +- [#2535](https://github.com/influxdb/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. +- [#2521](https://github.com/influxdb/influxdb/pull/2521): Don't truncate topic data until fully replicated. +- [#2509](https://github.com/influxdb/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart +- [#2536](https://github.com/influxdb/influxdb/issues/2532): Set leader ID on restart of single-node cluster. +- [#2448](https://github.com/influxdb/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! +- [#2108](https://github.com/influxdb/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! +- [#2539](https://github.com/influxdb/influxdb/issues/2539): Add additional vote request logging. +- [#2541](https://github.com/influxdb/influxdb/issues/2541): Update messaging client connection index with every message. +- [#2542](https://github.com/influxdb/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. +- [#2548](https://github.com/influxdb/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. +- [#2487](https://github.com/influxdb/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! +- [#2552](https://github.com/influxdb/influxdb/issues/2552): Run CQ that is actually passed into go-routine. +- [#2553](https://github.com/influxdb/influxdb/issues/2553): Fix race condition during CQ execution. +- [#2557](https://github.com/influxdb/influxdb/issues/2557): RC30 WHERE time filter Regression. + +## v0.9.0-rc29 [2015-05-05] + +### Features +- [#2410](https://github.com/influxdb/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. +- [#2469](https://github.com/influxdb/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. +- [#1824](https://github.com/influxdb/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! + +### Bugfixes +- [#2446](https://github.com/influxdb/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart +- [#2452](https://github.com/influxdb/influxdb/issues/2452): Fix panic with shard stats on multiple clusters +- [#2453](https://github.com/influxdb/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). +- [#2460](https://github.com/influxdb/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick +- [#2465](https://github.com/influxdb/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz +- [#2475](https://github.com/influxdb/influxdb/pull/2475): RLock server when checking if shards groups are required during write. +- [#2471](https://github.com/influxdb/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart +- [#2281](https://github.com/influxdb/influxdb/issues/2281): Fix Bad Escape error when parsing regex + +## v0.9.0-rc28 [2015-04-27] + +### Features +- [#2410](https://github.com/influxdb/influxdb/pull/2410) Allow configuration of Raft timers +- [#2354](https://github.com/influxdb/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! + +### Bugfixes +- [#2374](https://github.com/influxdb/influxdb/issues/2374): Two different panics during SELECT percentile +- [#2404](https://github.com/influxdb/influxdb/pull/2404): Mean and percentile function fixes +- [#2408](https://github.com/influxdb/influxdb/pull/2408): Fix snapshot 500 error +- [#1896](https://github.com/influxdb/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop +- [#2418](https://github.com/influxdb/influxdb/pull/2418): Fix raft node getting stuck in candidate state +- [#2415](https://github.com/influxdb/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost +- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. +- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in Graphite server. +- [#2429](https://github.com/influxdb/influxdb/pull/2429): Ensure no field value is null. +- [#2431](https://github.com/influxdb/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils +- [#2441](https://github.com/influxdb/influxdb/pull/2441): Correctly release server RLock during "drop series". +- [#2445](https://github.com/influxdb/influxdb/pull/2445): Read locks and data race fixes + +## v0.9.0-rc27 [04-23-2015] + +### Features +- [#2398](https://github.com/influxdb/influxdb/pull/2398) Track more stats and report errors for shards. + +### Bugfixes +- [#2370](https://github.com/influxdb/influxdb/pull/2370): Fix data race in openTSDB endpoint. +- [#2371](https://github.com/influxdb/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 +- [#2372](https://github.com/influxdb/influxdb/pull/2372): Fix data race in graphite endpoint. +- [#2373](https://github.com/influxdb/influxdb/pull/2373): Actually allow HTTP logging to be controlled. +- [#2376](https://github.com/influxdb/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. +- [#2376](https://github.com/influxdb/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. +- [#2386](https://github.com/influxdb/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times +- [#2393](https://github.com/influxdb/influxdb/pull/2393): Fix default hostname for connecting to cluster. +- [#2390](https://github.com/influxdb/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! +- [#2391](https://github.com/influxdb/influxdb/pull/2391): Unable to write points through Go client when authentication enabled +- [#2400](https://github.com/influxdb/influxdb/pull/2400): Always send auth headers for client requests if present + +## v0.9.0-rc26 [04-21-2015] + +### Features +- [#2301](https://github.com/influxdb/influxdb/pull/2301): Distributed query load balancing and failover +- [#2336](https://github.com/influxdb/influxdb/pull/2336): Handle distributed queries when shards != data nodes +- [#2353](https://github.com/influxdb/influxdb/pull/2353): Distributed Query/Clustering Fixes + +### Bugfixes +- [#2297](https://github.com/influxdb/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. +- [#2312](https://github.com/influxdb/influxdb/pull/2312): Re-use httpclient for continuous queries +- [#2318](https://github.com/influxdb/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. +- [#2242](https://github.com/influxdb/influxdb/pull/2242): Distributed Query should balance requests +- [#2243](https://github.com/influxdb/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ +- [#2190](https://github.com/influxdb/influxdb/pull/2190): Implement failover to other data nodes for distributed queries +- [#2324](https://github.com/influxdb/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() +- [#2325](https://github.com/influxdb/influxdb/pull/2325): Cluster open fixes +- [#2326](https://github.com/influxdb/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY +- [#2300](https://github.com/influxdb/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. +- [#2338](https://github.com/influxdb/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been +- [#2340](https://github.com/influxdb/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. +- [#2351](https://github.com/influxdb/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. +- [#2348](https://github.com/influxdb/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 +- [#2343](https://github.com/influxdb/influxdb/pull/2343): Node falls behind Metastore updates +- [#2334](https://github.com/influxdb/influxdb/pull/2334): Test Partial replication is very problematic +- [#2272](https://github.com/influxdb/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a +- [#2350](https://github.com/influxdb/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. +- [#2367](https://github.com/influxdb/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. + +## v0.9.0-rc25 [2015-04-15] + +### Bugfixes +- [#2282](https://github.com/influxdb/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. +- [#2283](https://github.com/influxdb/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. +- [#2293](https://github.com/influxdb/influxdb/pull/2293): Open cluster listener before starting broker. +- [#2287](https://github.com/influxdb/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. +- [#2288](https://github.com/influxdb/influxdb/pull/2288): Fix expression parsing bug. +- [#2294](https://github.com/influxdb/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). + +## Features +- [#2276](https://github.com/influxdb/influxdb/pull/2276): Broker topic truncation. +- [#2292](https://github.com/influxdb/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! +- [#2290](https://github.com/influxdb/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! +- [#2295](https://github.com/influxdb/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! +- [#2246](https://github.com/influxdb/influxdb/pull/2246): Allow HTTP logging to be controlled. + +## v0.9.0-rc24 [2015-04-13] + +### Bugfixes +- [#2255](https://github.com/influxdb/influxdb/pull/2255): Fix panic when changing default retention policy. +- [#2257](https://github.com/influxdb/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. +- [#2261](https://github.com/influxdb/influxdb/pull/2261): Support int64 value types. +- [#2191](https://github.com/influxdb/influxdb/pull/2191): Case-insensitive check for "fill" +- [#2274](https://github.com/influxdb/influxdb/pull/2274): Snapshot and HTTP API endpoints +- [#2265](https://github.com/influxdb/influxdb/pull/2265): Fix auth for CLI. + +## v0.9.0-rc23 [2015-04-11] + +### Features +- [#2202](https://github.com/influxdb/influxdb/pull/2202): Initial implementation of Distributed Queries +- [#2202](https://github.com/influxdb/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. + +### Bugfixes +- [#2225](https://github.com/influxdb/influxdb/pull/2225): Make keywords completely case insensitive +- [#2228](https://github.com/influxdb/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement +- [#2236](https://github.com/influxdb/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof +- [#2213](https://github.com/influxdb/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. + +## v0.9.0-rc22 [2015-04-09] + +### Features +- [#2214](https://github.com/influxdb/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g + +### Bugfixes +- [#2223](https://github.com/influxdb/influxdb/pull/2223): Always notify term change on RequestVote + +## v0.9.0-rc21 [2015-04-09] + +### Features +- [#870](https://github.com/influxdb/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate +- [#2180](https://github.com/influxdb/influxdb/pull/2180): Allow http write handler to decode gzipped body +- [#2175](https://github.com/influxdb/influxdb/pull/2175): Separate broker and data nodes +- [#2158](https://github.com/influxdb/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g +- [#2201](https://github.com/influxdb/influxdb/pull/2201): Bring back config join URLs +- [#2121](https://github.com/influxdb/influxdb/pull/2121): Parser refactor + +### Bugfixes +- [#2181](https://github.com/influxdb/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". +- [#2170](https://github.com/influxdb/influxdb/pull/2170): Make sure queries on missing tags return 200 status. +- [#2197](https://github.com/influxdb/influxdb/pull/2197): Lock server during Open(). +- [#2200](https://github.com/influxdb/influxdb/pull/2200): Re-enable Continuous Queries. +- [#2203](https://github.com/influxdb/influxdb/pull/2203): Fix race condition on continuous queries. +- [#2217](https://github.com/influxdb/influxdb/pull/2217): Only revert to follower if new term is greater. +- [#2219](https://github.com/influxdb/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium + +## v0.9.0-rc20 [2015-04-04] + +### Features +- [#2128](https://github.com/influxdb/influxdb/pull/2128): Data node discovery from brokers +- [#2142](https://github.com/influxdb/influxdb/pull/2142): Support chunked queries +- [#2154](https://github.com/influxdb/influxdb/pull/2154): Node redirection +- [#2168](https://github.com/influxdb/influxdb/pull/2168): Return raft term from vote, add term logging + +### Bugfixes +- [#2147](https://github.com/influxdb/influxdb/pull/2147): Set Go Max procs in a better location +- [#2137](https://github.com/influxdb/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. +- [#2151](https://github.com/influxdb/influxdb/pull/2151): Ignore replay commands on the metastore. +- [#2152](https://github.com/influxdb/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' +- [#2156](https://github.com/influxdb/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. +- [#2163](https://github.com/influxdb/influxdb/pull/2163): Fix up paths for default data and run storage. +- [#2164](https://github.com/influxdb/influxdb/pull/2164): Append STDOUT/STDERR in initscript. +- [#2165](https://github.com/influxdb/influxdb/pull/2165): Better name for config section for stats and diags. +- [#2165](https://github.com/influxdb/influxdb/pull/2165): Monitoring database and retention policy are not configurable. +- [#2167](https://github.com/influxdb/influxdb/pull/2167): Add broker log recovery. +- [#2166](https://github.com/influxdb/influxdb/pull/2166): Don't panic if presented with a field of unknown type. +- [#2149](https://github.com/influxdb/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. +- [#2150](https://github.com/influxdb/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. + +## v0.9.0-rc19 [2015-04-01] + +### Features +- [#2143](https://github.com/influxdb/influxdb/pull/2143): Add raft term logging. + +### Bugfixes +- [#2145](https://github.com/influxdb/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. + +## v0.9.0-rc18 [2015-03-31] + +### Bugfixes +- [#2100](https://github.com/influxdb/influxdb/pull/2100): Use channel to synchronize collectd shutdown. +- [#2100](https://github.com/influxdb/influxdb/pull/2100): Synchronize access to shard index. +- [#2131](https://github.com/influxdb/influxdb/pull/2131): Optimize marshalTags(). +- [#2130](https://github.com/influxdb/influxdb/pull/2130): Make fewer calls to marshalTags(). +- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. +- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support !~ tags values. +- [#2138](https://github.com/influxdb/influxdb/pull/2136): Use map for marshaledTags cache. + +## v0.9.0-rc17 [2015-03-29] + +### Features +- [#2076](https://github.com/influxdb/influxdb/pull/2076): Separate stdout and stderr output in init.d script +- [#2091](https://github.com/influxdb/influxdb/pull/2091): Support disabling snapshot endpoint. +- [#2081](https://github.com/influxdb/influxdb/pull/2081): Support writing diagnostic data into the internal database. +- [#2095](https://github.com/influxdb/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed + +### Bugfixes +- [#2093](https://github.com/influxdb/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed +- [#2084](https://github.com/influxdb/influxdb/pull/2084): Allowing leading underscores in identifiers. +- [#2080](https://github.com/influxdb/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. +- [#2101](https://github.com/influxdb/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". +- [#2104](https://github.com/influxdb/influxdb/pull/2104): Include NEQ when calculating field filters. +- [#2112](https://github.com/influxdb/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. +- [#2111](https://github.com/influxdb/influxdb/pull/2111) and [#2025](https://github.com/influxdb/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. +- [#2114](https://github.com/influxdb/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. + +## v0.9.0-rc16 [2015-03-24] + +### Features +- [#2058](https://github.com/influxdb/influxdb/pull/2058): Track number of queries executed in stats. +- [#2059](https://github.com/influxdb/influxdb/pull/2059): Retention policies sorted by name on return to client. +- [#2061](https://github.com/influxdb/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. +- [#2064](https://github.com/influxdb/influxdb/pull/2064): Allow init.d script to return influxd version. +- [#2053](https://github.com/influxdb/influxdb/pull/2053): Implment backup and restore. +- [#1631](https://github.com/influxdb/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. + +### Bugfixes +- [#2037](https://github.com/influxdb/influxdb/pull/2037): Don't check 'configExists' at Run() level. +- [#2039](https://github.com/influxdb/influxdb/pull/2039): Don't panic if getting current user fails. +- [#2034](https://github.com/influxdb/influxdb/pull/2034): GROUP BY should require an aggregate. +- [#2040](https://github.com/influxdb/influxdb/pull/2040): Add missing top-level help for config command. +- [#2057](https://github.com/influxdb/influxdb/pull/2057): Move racy "in order" test to integration test suite. +- [#2060](https://github.com/influxdb/influxdb/pull/2060): Reload server shard map on restart. +- [#2068](https://github.com/influxdb/influxdb/pull/2068): Fix misspelled JSON field. +- [#2067](https://github.com/influxdb/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. + +## v0.9.0-rc15 [2015-03-19] + +### Features +- [#2000](https://github.com/influxdb/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. +- [#2007](https://github.com/influxdb/influxdb/pull/2007): Track shard-level stats. + +### Bugfixes +- [#2001](https://github.com/influxdb/influxdb/pull/2001): Ensure measurement not found returns status code 200. +- [#1985](https://github.com/influxdb/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. +- [#2003](https://github.com/influxdb/influxdb/pull/2003): Set timestamp when writing monitoring stats. +- [#2004](https://github.com/influxdb/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). +- [#2016](https://github.com/influxdb/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann +- [#2021](https://github.com/influxdb/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern + + +## v0.9.0-rc14 [2015-03-18] + +### Bugfixes +- [#1999](https://github.com/influxdb/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. + +## v0.9.0-rc13 [2015-03-17] + +### Features +- [#1974](https://github.com/influxdb/influxdb/pull/1974): Add time taken for request to the http server logs. + +### Bugfixes +- [#1971](https://github.com/influxdb/influxdb/pull/1971): Fix leader id initialization. +- [#1975](https://github.com/influxdb/influxdb/pull/1975): Require `q` parameter for query endpoint. +- [#1969](https://github.com/influxdb/influxdb/pull/1969): Print loaded config. +- [#1987](https://github.com/influxdb/influxdb/pull/1987): Fix config print startup statement for when no config is provided. +- [#1990](https://github.com/influxdb/influxdb/pull/1990): Drop measurement was taking too long due to transactions. + +## v0.9.0-rc12 [2015-03-15] + +### Bugfixes +- [#1942](https://github.com/influxdb/influxdb/pull/1942): Sort wildcard names. +- [#1957](https://github.com/influxdb/influxdb/pull/1957): Graphite numbers are always float64. +- [#1955](https://github.com/influxdb/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio +- [#1952](https://github.com/influxdb/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio + +### Features +- [#1935](https://github.com/influxdb/influxdb/pull/1935): Implement stateless broker for Raft. +- [#1936](https://github.com/influxdb/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring + +### Features +- [#1909](https://github.com/influxdb/influxdb/pull/1909): Implement a dump command. + +## v0.9.0-rc11 [2015-03-13] + +### Bugfixes +- [#1917](https://github.com/influxdb/influxdb/pull/1902): Creating Infinite Retention Policy Failed. +- [#1758](https://github.com/influxdb/influxdb/pull/1758): Add Graphite Integration Test. +- [#1929](https://github.com/influxdb/influxdb/pull/1929): Default Retention Policy incorrectly auto created. +- [#1930](https://github.com/influxdb/influxdb/pull/1930): Auto create database for graphite if not specified. +- [#1908](https://github.com/influxdb/influxdb/pull/1908): Cosmetic CLI output fixes. +- [#1931](https://github.com/influxdb/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. +- [#1937](https://github.com/influxdb/influxdb/pull/1937): OFFSET should be allowed to be 0. + +### Features +- [#1902](https://github.com/influxdb/influxdb/pull/1902): Enforce retention policies to have a minimum duration. +- [#1906](https://github.com/influxdb/influxdb/pull/1906): Add show servers to query language. +- [#1925](https://github.com/influxdb/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. + +## v0.9.0-rc10 [2015-03-09] + +### Bugfixes +- [#1867](https://github.com/influxdb/influxdb/pull/1867): Fix race accessing topic replicas map +- [#1864](https://github.com/influxdb/influxdb/pull/1864): fix race in startStateLoop +- [#1753](https://github.com/influxdb/influxdb/pull/1874): Do Not Panic on Missing Dirs +- [#1877](https://github.com/influxdb/influxdb/pull/1877): Broker clients track broker leader +- [#1862](https://github.com/influxdb/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin +- [#1883](https://github.com/influxdb/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha +- [#1868](https://github.com/influxdb/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. +- [#1881](https://github.com/influxdb/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. +- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select + +### Features +- [#1875](https://github.com/influxdb/influxdb/pull/1875): Support trace logging of Raft. +- [#1895](https://github.com/influxdb/influxdb/pull/1895): Auto-create a retention policy when a database is created. +- [#1897](https://github.com/influxdb/influxdb/pull/1897): Pre-create shard groups. +- [#1900](https://github.com/influxdb/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` + +## v0.9.0-rc9 [2015-03-06] + +### Bugfixes +- [#1872](https://github.com/influxdb/influxdb/pull/1872): Fix "stale term" errors with raft + +## v0.9.0-rc8 [2015-03-05] + +### Bugfixes +- [#1836](https://github.com/influxdb/influxdb/pull/1836): Store each parsed shell command in history file. +- [#1789](https://github.com/influxdb/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh +- [#1859](https://github.com/influxdb/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist + +### Features +- [#1755](https://github.com/influxdb/influxdb/pull/1848): Support JSON data ingest over UDP +- [#1857](https://github.com/influxdb/influxdb/pull/1857): Support retention policies with infinite duration +- [#1858](https://github.com/influxdb/influxdb/pull/1858): Enable detailed tracing of write path + +## v0.9.0-rc7 [2015-03-02] + +### Features +- [#1813](https://github.com/influxdb/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. +- [#1826](https://github.com/influxdb/influxdb/pull/1826), [#1827](https://github.com/influxdb/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. + +### Bugfixes + +- [#1744](https://github.com/influxdb/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh +- [#1809](https://github.com/influxdb/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos + +## v0.9.0-rc6 [2015-02-27] + +### Bugfixes + +- [#1780](https://github.com/influxdb/influxdb/pull/1780): Malformed identifiers get through the parser +- [#1775](https://github.com/influxdb/influxdb/pull/1775): Panic "index out of range" on some queries +- [#1744](https://github.com/influxdb/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. + +## v0.9.0-rc5 [2015-02-27] + +### Bugfixes + +- [#1752](https://github.com/influxdb/influxdb/pull/1752): remove debug log output from collectd. +- [#1720](https://github.com/influxdb/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. +- [#1767](https://github.com/influxdb/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. +- [#1773](https://github.com/influxdb/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval +- [#1771](https://github.com/influxdb/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` + +### Features + +- [#1698](https://github.com/influxdb/influxdb/pull/1698): Wire up DROP MEASUREMENT + +## v0.9.0-rc4 [2015-02-24] + +### Bugfixes + +- Fix authentication issue with continuous queries +- Print version in the log on startup + +## v0.9.0-rc3 [2015-02-23] + +### Features + +- [#1659](https://github.com/influxdb/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' +- [#1580](https://github.com/influxdb/influxdb/pull/1580): Add support for fields with bool, int, or string data types +- [#1687](https://github.com/influxdb/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE +- [#1629](https://github.com/influxdb/influxdb/pull/1629): Add support for `DROP SERIES` queries +- [#1632](https://github.com/influxdb/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement +- [#1689](https://github.com/influxdb/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE +- [#1699](https://github.com/influxdb/influxdb/pull/1699): Add CPU and memory profiling options to daemon +- [#1672](https://github.com/influxdb/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work +- [#1591](https://github.com/influxdb/influxdb/pull/1591): Add `spread` aggregate function +- [#1576](https://github.com/influxdb/influxdb/pull/1576): Add `first` and `last` aggregate functions +- [#1573](https://github.com/influxdb/influxdb/pull/1573): Add `stddev` aggregate function +- [#1565](https://github.com/influxdb/influxdb/pull/1565): Add the admin interface back into the server and update for new API +- [#1562](https://github.com/influxdb/influxdb/pull/1562): Enforce retention policies +- [#1700](https://github.com/influxdb/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE +- [#1706](https://github.com/influxdb/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause + +### Bugfixes + +- [#1636](https://github.com/influxdb/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE +- [#1701](https://github.com/influxdb/influxdb/pull/1701), [#1667](https://github.com/influxdb/influxdb/pull/1667), [#1663](https://github.com/influxdb/influxdb/pull/1663), [#1615](https://github.com/influxdb/influxdb/pull/1615): Raft fixes +- [#1644](https://github.com/influxdb/influxdb/pull/1644): Add batching support for significantly improved write performance +- [#1704](https://github.com/influxdb/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) +- [#1718](https://github.com/influxdb/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field +- [#1806](https://github.com/influxdb/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. + + +## v0.9.0-rc1,2 [no public release] + +### Features + +- Support for tags added +- New queries for showing measurement names, tag keys, and tag values +- Renamed shard spaces to retention policies +- Deprecated matching against regex in favor of explicit writing and querying on retention policies +- Pure Go InfluxQL parser +- Switch to BoltDB as underlying datastore +- BoltDB backed metastore to store schema information +- Updated HTTP API to only have two endpoints `/query` and `/write` +- Added all administrative functions to the query language +- Change cluster architecture to have brokers and data nodes +- Switch to streaming Raft implementation +- In memory inverted index of the tag data +- Pure Go implementation! + +## v0.8.6 [2014-11-15] + +### Features + +- [Issue #973](https://github.com/influxdb/influxdb/issues/973). Support + joining using a regex or list of time series +- [Issue #1068](https://github.com/influxdb/influxdb/issues/1068). Print + the processor chain when the query is started + +### Bugfixes + +- [Issue #584](https://github.com/influxdb/influxdb/issues/584). Don't + panic if the process died while initializing +- [Issue #663](https://github.com/influxdb/influxdb/issues/663). Make + sure all sub servies are closed when are stopping InfluxDB +- [Issue #671](https://github.com/influxdb/influxdb/issues/671). Fix + the Makefile package target for Mac OSX +- [Issue #800](https://github.com/influxdb/influxdb/issues/800). Use + su instead of sudo in the init script. This fixes the startup problem + on RHEL 6. +- [Issue #925](https://github.com/influxdb/influxdb/issues/925). Don't + generate invalid query strings for single point queries +- [Issue #943](https://github.com/influxdb/influxdb/issues/943). Don't + take two snapshots at the same time +- [Issue #947](https://github.com/influxdb/influxdb/issues/947). Exit + nicely if the daemon doesn't have permission to write to the log. +- [Issue #959](https://github.com/influxdb/influxdb/issues/959). Stop using + closed connections in the protobuf client. +- [Issue #978](https://github.com/influxdb/influxdb/issues/978). Check + for valgrind and mercurial in the configure script +- [Issue #996](https://github.com/influxdb/influxdb/issues/996). Fill should + fill the time range even if no points exists in the given time range +- [Issue #1008](https://github.com/influxdb/influxdb/issues/1008). Return + an appropriate exit status code depending on whether the process exits + due to an error or exits gracefully. +- [Issue #1024](https://github.com/influxdb/influxdb/issues/1024). Hitting + open files limit causes influxdb to create shards in loop. +- [Issue #1069](https://github.com/influxdb/influxdb/issues/1069). Fix + deprecated interface endpoint in Admin UI. +- [Issue #1076](https://github.com/influxdb/influxdb/issues/1076). Fix + the timestamps of data points written by the collectd plugin. (Thanks, + @renchap for reporting this bug) +- [Issue #1078](https://github.com/influxdb/influxdb/issues/1078). Make sure + we don't resurrect shard directories for shards that have already expired +- [Issue #1085](https://github.com/influxdb/influxdb/issues/1085). Set + the connection string of the local raft node +- [Issue #1092](https://github.com/influxdb/influxdb/issues/1093). Set + the connection string of the local node in the raft snapshot. +- [Issue #1100](https://github.com/influxdb/influxdb/issues/1100). Removing + a non-existent shard space causes the cluster to panic. +- [Issue #1113](https://github.com/influxdb/influxdb/issues/1113). A nil + engine.ProcessorChain causes a panic. + +## v0.8.5 [2014-10-27] + +### Features + +- [Issue #1055](https://github.com/influxdb/influxdb/issues/1055). Allow + graphite and collectd input plugins to have separate binding address + +### Bugfixes + +- [Issue #1058](https://github.com/influxdb/influxdb/issues/1058). Use + the query language instead of the continuous query endpoints that + were removed in 0.8.4 +- [Issue #1022](https://github.com/influxdb/influxdb/issues/1022). Return + an +Inf or NaN instead of panicing when we encounter a divide by zero +- [Issue #821](https://github.com/influxdb/influxdb/issues/821). Don't + scan through points when we hit the limit +- [Issue #1051](https://github.com/influxdb/influxdb/issues/1051). Fix + timestamps when the collectd is used and low resolution timestamps + is set. + +## v0.8.4 [2014-10-24] + +### Bugfixes + +- Remove the continuous query api endpoints since the query language + has all the features needed to list and delete continuous queries. +- [Issue #778](https://github.com/influxdb/influxdb/issues/778). Selecting + from a non-existent series should give a better error message indicating + that the series doesn't exist +- [Issue #988](https://github.com/influxdb/influxdb/issues/988). Check + the arguments of `top()` and `bottom()` +- [Issue #1021](https://github.com/influxdb/influxdb/issues/1021). Make + redirecting to standard output and standard error optional instead of + going to `/dev/null`. This can now be configured by setting `$STDOUT` + in `/etc/default/influxdb` +- [Issue #985](https://github.com/influxdb/influxdb/issues/985). Make + sure we drop a shard only when there's no one using it. Otherwise, the + shard can be closed when another goroutine is writing to it which will + cause random errors and possibly corruption of the database. + +### Features + +- [Issue #1047](https://github.com/influxdb/influxdb/issues/1047). Allow + merge() to take a list of series (as opposed to a regex in #72) + +## v0.8.4-rc.1 [2014-10-21] + +### Bugfixes + +- [Issue #1040](https://github.com/influxdb/influxdb/issues/1040). Revert + to older raft snapshot if the latest one is corrupted +- [Issue #1004](https://github.com/influxdb/influxdb/issues/1004). Querying + for data outside of existing shards returns an empty response instead of + throwing a `Couldn't lookup columns` error +- [Issue #1020](https://github.com/influxdb/influxdb/issues/1020). Change + init script exit codes to conform to the lsb standards. (Thanks, @spuder) +- [Issue #1011](https://github.com/influxdb/influxdb/issues/1011). Fix + the tarball for homebrew so that rocksdb is included and the directory + structure is clean +- [Issue #1007](https://github.com/influxdb/influxdb/issues/1007). Fix + the content type when an error occurs and the client requests + compression. +- [Issue #916](https://github.com/influxdb/influxdb/issues/916). Set + the ulimit in the init script with a way to override the limit +- [Issue #742](https://github.com/influxdb/influxdb/issues/742). Fix + rocksdb for Mac OSX +- [Issue #387](https://github.com/influxdb/influxdb/issues/387). Aggregations + with group by time(1w), time(1m) and time(1y) (for week, month and + year respectively) will cause the start time and end time of the bucket + to fall on the logical boundaries of the week, month or year. +- [Issue #334](https://github.com/influxdb/influxdb/issues/334). Derivative + for queries with group by time() and fill(), will take the difference + between the first value in the bucket and the first value of the next + bucket. +- [Issue #972](https://github.com/influxdb/influxdb/issues/972). Don't + assign duplicate server ids + +### Features + +- [Issue #722](https://github.com/influxdb/influxdb/issues/722). Add + an install target to the Makefile +- [Issue #1032](https://github.com/influxdb/influxdb/issues/1032). Include + the admin ui static assets in the binary +- [Issue #1019](https://github.com/influxdb/influxdb/issues/1019). Upgrade + to rocksdb 3.5.1 +- [Issue #992](https://github.com/influxdb/influxdb/issues/992). Add + an input plugin for collectd. (Thanks, @kimor79) +- [Issue #72](https://github.com/influxdb/influxdb/issues/72). Support merge + for multiple series using regex syntax + +## v0.8.3 [2014-09-24] + +### Bugfixes + +- [Issue #885](https://github.com/influxdb/influxdb/issues/885). Multiple + queries separated by semicolons work as expected. Queries are process + sequentially +- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return an + error if an invalid column is used in the where clause +- [Issue #794](https://github.com/influxdb/influxdb/issues/794). Fix case + insensitive regex matching +- [Issue #853](https://github.com/influxdb/influxdb/issues/853). Move + cluster config from raft to API. +- [Issue #714](https://github.com/influxdb/influxdb/issues/714). Don't + panic on invalid boolean operators. +- [Issue #843](https://github.com/influxdb/influxdb/issues/843). Prevent blank database names +- [Issue #780](https://github.com/influxdb/influxdb/issues/780). Fix + fill() for all aggregators +- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #967](https://github.com/influxdb/influxdb/issues/967). Return an + error if the storage engine can't be created +- [Issue #954](https://github.com/influxdb/influxdb/issues/954). Don't automatically + create shards which was causing too many shards to be created when used with + grafana +- [Issue #939](https://github.com/influxdb/influxdb/issues/939). Aggregation should + ignore null values and invalid values, e.g. strings with mean(). +- [Issue #964](https://github.com/influxdb/influxdb/issues/964). Parse + big int in queries properly. + +## v0.8.2 [2014-09-05] + +### Bugfixes + +- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Update shard space to not set defaults + +- [Issue #867](https://github.com/influxdb/influxdb/issues/867). Add option to return shard space mappings in list series + +### Bugfixes + +- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return + a meaningful error if an invalid column is used in where clause + after joining multiple series + +## v0.8.2 [2014-09-08] + +### Features + +- Added API endpoint to update shard space definitions + +### Bugfixes + +- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB + +## v0.8.1 [2014-09-03] + +- [Issue #896](https://github.com/influxdb/influxdb/issues/896). Allow logging to syslog. Thanks @malthe + +### Bugfixes + +- [Issue #868](https://github.com/influxdb/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x +- [Issue #887](https://github.com/influxdb/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled +- [Issue #674](https://github.com/influxdb/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) +- [Issue #857](https://github.com/influxdb/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) + +## v0.8.0 [2014-08-22] + +### Features + +- [Issue #850](https://github.com/influxdb/influxdb/issues/850). Makes the server listing more informative + +### Bugfixes + +- [Issue #779](https://github.com/influxdb/influxdb/issues/779). Deleting expired shards isn't thread safe. +- [Issue #860](https://github.com/influxdb/influxdb/issues/860). Load database config should validate shard spaces. +- [Issue #862](https://github.com/influxdb/influxdb/issues/862). Data migrator should have option to set delay time. + +## v0.8.0-rc.5 [2014-08-15] + +### Features + +- [Issue #376](https://github.com/influxdb/influxdb/issues/376). List series should support regex filtering +- [Issue #745](https://github.com/influxdb/influxdb/issues/745). Add continuous queries to the database config +- [Issue #746](https://github.com/influxdb/influxdb/issues/746). Add data migration tool for 0.8.0 + +### Bugfixes + +- [Issue #426](https://github.com/influxdb/influxdb/issues/426). Fill should fill the entire time range that is requested +- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Don't emit non existent fields when joining series with different fields +- [Issue #744](https://github.com/influxdb/influxdb/issues/744). Admin site should have all assets locally +- [Issue #767](https://github.com/influxdb/influxdb/issues/768). Remove shards whenever they expire +- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Don't emit non existent fields when joining series with different fields +- [Issue #791](https://github.com/influxdb/influxdb/issues/791). Move database config loader to be an API endpoint +- [Issue #809](https://github.com/influxdb/influxdb/issues/809). Migration path from 0.7 -> 0.8 +- [Issue #811](https://github.com/influxdb/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft +- [Issue #820](https://github.com/influxdb/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range +- [Issue #827](https://github.com/influxdb/influxdb/issues/827). Don't leak file descriptors in the WAL +- [Issue #830](https://github.com/influxdb/influxdb/issues/830). List series should return series in lexicographic sorted order +- [Issue #831](https://github.com/influxdb/influxdb/issues/831). Move create shard space to be db specific + +## v0.8.0-rc.4 [2014-07-29] + +### Bugfixes + +- [Issue #774](https://github.com/influxdb/influxdb/issues/774). Don't try to parse "inf" shard retention policy +- [Issue #769](https://github.com/influxdb/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) +- [Issue #736](https://github.com/influxdb/influxdb/issues/736). Only db admins should be able to drop a series +- [Issue #713](https://github.com/influxdb/influxdb/issues/713). Null should be a valid fill value +- [Issue #644](https://github.com/influxdb/influxdb/issues/644). Graphite api should write data in batches to the coordinator +- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Panic when distinct fields are selected from an inner join +- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Panic when distinct fields are added after an inner join + +## v0.8.0-rc.3 [2014-07-21] + +### Bugfixes + +- [Issue #752](https://github.com/influxdb/influxdb/issues/752). `./configure` should use goroot to find gofmt +- [Issue #758](https://github.com/influxdb/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) +- [Issue #759](https://github.com/influxdb/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) +- [Issue #760](https://github.com/influxdb/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) +- [Issue #772](https://github.com/influxdb/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. + + +## v0.8.0-rc.2 [2014-07-15] + +- This release is to fix a build error in rc1 which caused rocksdb to not be available +- Bump up the `max-open-files` option to 1000 on all storage engines +- Lower the `write-buffer-size` to 1000 + +## v0.8.0-rc.1 [2014-07-15] + +### Features + +- [Issue #643](https://github.com/influxdb/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) +- [Issue #641](https://github.com/influxdb/influxdb/issues/641). Support multiple storage engines +- [Issue #665](https://github.com/influxdb/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) +- [Issue #667](https://github.com/influxdb/influxdb/issues/667). Enable compression on all GET requests and when writing data +- [Issue #648](https://github.com/influxdb/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) +- [Issue #682](https://github.com/influxdb/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) +- [Issue #689](https://github.com/influxdb/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft +- [Issue #255](https://github.com/influxdb/influxdb/issues/255). Support millisecond precision using `ms` suffix +- [Issue #95](https://github.com/influxdb/influxdb/issues/95). Drop database should not be synchronous +- [Issue #571](https://github.com/influxdb/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies +- Default storage engine changed to RocksDB + +### Bugfixes + +- [Issue #651](https://github.com/influxdb/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) +- [Issue #670](https://github.com/influxdb/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs +- [Issue #676](https://github.com/influxdb/influxdb/issues/676). Allow storing high precision integer values without losing any information +- [Issue #695](https://github.com/influxdb/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) +- [Issue #731](https://github.com/influxdb/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false +- [Issue #733](https://github.com/influxdb/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled +- [Issue #707](https://github.com/influxdb/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character +- [Issue #734](https://github.com/influxdb/influxdb/issues/734). Don't buffer non replicated writes +- [Issue #465](https://github.com/influxdb/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore +- [Issue #358](https://github.com/influxdb/influxdb/issues/358). **BREAKING** List series should return as a single series +- [Issue #499](https://github.com/influxdb/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error +- [Issue #570](https://github.com/influxdb/influxdb/issues/570). InfluxDB crashes during delete/drop of database +- [Issue #592](https://github.com/influxdb/influxdb/issues/592). Drop series is inefficient + +## v0.7.3 [2014-06-13] + +### Bugfixes + +- [Issue #637](https://github.com/influxdb/influxdb/issues/637). Truncate log files if the last request wasn't written properly +- [Issue #646](https://github.com/influxdb/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. + +## v0.7.2 [2014-05-30] + +### Features + +- [Issue #521](https://github.com/influxdb/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) + +### Bugfixes + +- [Issue #418](https://github.com/influxdb/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. +- [Issue #606](https://github.com/influxdb/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist +- [Issue #602](https://github.com/influxdb/influxdb/issues/602). Merge will fail to work across shards + +### Features + +## v0.7.1 [2014-05-29] + +### Bugfixes + +- [Issue #579](https://github.com/influxdb/influxdb/issues/579). Reject writes to nonexistent databases +- [Issue #597](https://github.com/influxdb/influxdb/issues/597). Force compaction after deleting data + +### Features + +- [Issue #476](https://github.com/influxdb/influxdb/issues/476). Support ARM architecture +- [Issue #578](https://github.com/influxdb/influxdb/issues/578). Support aliasing for expressions in parenthesis +- [Issue #544](https://github.com/influxdb/influxdb/pull/544). Support forcing node removal from a cluster +- [Issue #591](https://github.com/influxdb/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) +- [Issue #600](https://github.com/influxdb/influxdb/pull/600). Report version, os, arch, and raftName once per day. + +## v0.7.0 [2014-05-23] + +### Bugfixes + +- [Issue #557](https://github.com/influxdb/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works +- [Issue #547](https://github.com/influxdb/influxdb/issues/547). Add difference function (Thanks, @mboelstra) +- [Issue #550](https://github.com/influxdb/influxdb/issues/550). Fix tests on 32-bit ARM +- [Issue #524](https://github.com/influxdb/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together +- [Issue #561](https://github.com/influxdb/influxdb/issues/561). Fix missing query in parsing errors +- [Issue #563](https://github.com/influxdb/influxdb/issues/563). Add sample config for graphite over udp +- [Issue #537](https://github.com/influxdb/influxdb/issues/537). Incorrect query syntax causes internal error +- [Issue #565](https://github.com/influxdb/influxdb/issues/565). Empty series names shouldn't cause a panic +- [Issue #575](https://github.com/influxdb/influxdb/issues/575). Single point select doesn't interpret timestamps correctly +- [Issue #576](https://github.com/influxdb/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq +- [Issue #560](https://github.com/influxdb/influxdb/issues/560). Use /dev/urandom instead of /dev/random +- [Issue #502](https://github.com/influxdb/influxdb/issues/502). Fix a + race condition in assigning id to db+series+field (Thanks @ohurvitz + for reporting this bug and providing a script to repro) + +### Features + +- [Issue #567](https://github.com/influxdb/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) + +### Deprecated + +- [Issue #460](https://github.com/influxdb/influxdb/issues/460). Don't start automatically after installing +- [Issue #529](https://github.com/influxdb/influxdb/issues/529). Don't run influxdb as root +- [Issue #443](https://github.com/influxdb/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins + +## v0.6.5 [2014-05-19] + +### Features + +- [Issue #551](https://github.com/influxdb/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) + +### Bugfixes + +- [Issue #555](https://github.com/influxdb/influxdb/issues/555). Fix a regression introduced in the raft snapshot format + +## v0.6.4 [2014-05-16] + +### Features + +- Make the write batch size configurable (also applies to deletes) +- Optimize writing to multiple series +- [Issue #546](https://github.com/influxdb/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) + +### Bugfixes + +- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards +- [Issue #489](https://github.com/influxdb/influxdb/issues/489). Remove replication factor from CreateDatabase command + +## v0.6.3 [2014-05-13] + +### Features + +- [Issue #505](https://github.com/influxdb/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) +- [Issue #520](https://github.com/influxdb/influxdb/issues/520). Print the version to the log file + +### Bugfixes + +- [Issue #516](https://github.com/influxdb/influxdb/issues/516). Close WAL log/index files when they aren't being used +- [Issue #532](https://github.com/influxdb/influxdb/issues/532). Don't log graphite connection EOF as an error +- [Issue #535](https://github.com/influxdb/influxdb/issues/535). WAL Replay hangs if response isn't received +- [Issue #538](https://github.com/influxdb/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns +- [Issue #536](https://github.com/influxdb/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic +- [Issue #539](https://github.com/influxdb/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups +- [Issue #534](https://github.com/influxdb/influxdb/issues/534). Create a new series when interpolating + +## v0.6.2 [2014-05-09] + +### Bugfixes + +- [Issue #511](https://github.com/influxdb/influxdb/issues/511). Don't automatically create the database when a db user is created +- [Issue #512](https://github.com/influxdb/influxdb/issues/512). Group by should respect null values +- [Issue #518](https://github.com/influxdb/influxdb/issues/518). Filter Infinities and NaNs from the returned json +- [Issue #522](https://github.com/influxdb/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files +- [Issue #369](https://github.com/influxdb/influxdb/issues/369). Fix some edge cases with WAL recovery + +## v0.6.1 [2014-05-06] + +### Bugfixes + +- [Issue #500](https://github.com/influxdb/influxdb/issues/500). Support `y` suffix in time durations +- [Issue #501](https://github.com/influxdb/influxdb/issues/501). Writes with invalid payload should be rejected +- [Issue #507](https://github.com/influxdb/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster +- [Issue #508](https://github.com/influxdb/influxdb/issues/508). Don't replay WAL entries for servers with no shards +- [Issue #464](https://github.com/influxdb/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns +- [Issue #480](https://github.com/influxdb/influxdb/issues/480). Large values on the y-axis get cut off + +## v0.6.0 [2014-05-02] + +### Feature + +- [Issue #477](https://github.com/influxdb/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) +- [Issue #491](https://github.com/influxdb/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) + +### Bugfixes + +- [Issue #469](https://github.com/influxdb/influxdb/issues/469). Drop continuous queries when a database is dropped +- [Issue #431](https://github.com/influxdb/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file +- [Issue #483](https://github.com/influxdb/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) +- [Issue #486](https://github.com/influxdb/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series +- [Issue #490](https://github.com/influxdb/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) +- [Issue #495](https://github.com/influxdb/influxdb/issues/495). Enforce write permissions properly + +## v0.5.12 [2014-04-29] + +### Bugfixes + +- [Issue #419](https://github.com/influxdb/influxdb/issues/419),[Issue #478](https://github.com/influxdb/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user + +## v0.5.11 [2014-04-25] + +### Features + +- [Issue #471](https://github.com/influxdb/influxdb/issues/471). Read and write permissions should be settable through the http api + +### Bugfixes + +- [Issue #323](https://github.com/influxdb/influxdb/issues/323). Continuous queries should guard against data loops +- [Issue #473](https://github.com/influxdb/influxdb/issues/473). Engine memory optimization + +## v0.5.10 [2014-04-22] + +### Features + +- [Issue #463](https://github.com/influxdb/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) +- [Issue #447](https://github.com/influxdb/influxdb/issues/447). Allow @ in usernames +- [Issue #466](https://github.com/influxdb/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) + +### Bugfixes + +- [Issue #458](https://github.com/influxdb/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 +- [Issue #457](https://github.com/influxdb/influxdb/issues/457). Deleting series that start with capital letters should work + +## v0.5.9 [2014-04-18] + +### Bugfixes + +- [Issue #446](https://github.com/influxdb/influxdb/issues/446). Check for (de)serialization errors +- [Issue #456](https://github.com/influxdb/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value +- [Issue #455](https://github.com/influxdb/influxdb/issues/455). Comparison operators should ignore null values + +## v0.5.8 [2014-04-17] + +- Renamed config.toml.sample to config.sample.toml + +### Bugfixes + +- [Issue #244](https://github.com/influxdb/influxdb/issues/244). Reconstruct the query from the ast +- [Issue #449](https://github.com/influxdb/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up +- [Issue #451](https://github.com/influxdb/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that + aggregation queries over large periods of time don't take insance amount of memory + +## v0.5.7 [2014-04-15] + +### Features + +- Queries are now logged as INFO in the log file before they run + +### Bugfixes + +- [Issue #328](https://github.com/influxdb/influxdb/issues/328). Join queries with math expressions don't work +- [Issue #440](https://github.com/influxdb/influxdb/issues/440). Heartbeat timeouts in logs +- [Issue #442](https://github.com/influxdb/influxdb/issues/442). shouldQuerySequentially didn't work as expected + causing count(*) queries on large time series to use + lots of memory +- [Issue #437](https://github.com/influxdb/influxdb/issues/437). Queries with negative constants don't parse properly +- [Issue #432](https://github.com/influxdb/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart +- [Issue #439](https://github.com/influxdb/influxdb/issues/439). Report the right location of the error in the query +- Fix some bugs with the WAL recovery on startup + +## v0.5.6 [2014-04-08] + +### Features + +- [Issue #310](https://github.com/influxdb/influxdb/issues/310). Request should support multiple timeseries +- [Issue #416](https://github.com/influxdb/influxdb/issues/416). Improve the time it takes to drop database + +### Bugfixes + +- [Issue #413](https://github.com/influxdb/influxdb/issues/413). Don't assume that group by interval is greater than a second +- [Issue #415](https://github.com/influxdb/influxdb/issues/415). Include the database when sending an auth error back to the user +- [Issue #421](https://github.com/influxdb/influxdb/issues/421). Make read timeout a config option +- [Issue #392](https://github.com/influxdb/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards + +### Bugfixes + +## v0.5.5 [2014-04-04] + +- Upgrade leveldb 1.10 -> 1.15 + + This should be a backward compatible change, but is here for documentation only + +### Feature + +- Add a command line option to repair corrupted leveldb databases on startup +- [Issue #401](https://github.com/influxdb/influxdb/issues/401). No limit on the number of columns in the group by clause + +### Bugfixes + +- [Issue #398](https://github.com/influxdb/influxdb/issues/398). Support now() and NOW() in the query lang +- [Issue #403](https://github.com/influxdb/influxdb/issues/403). Filtering should work with join queries +- [Issue #404](https://github.com/influxdb/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server +- [Issue #405](https://github.com/influxdb/influxdb/issues/405). Percentile shouldn't crash for small number of values +- [Issue #408](https://github.com/influxdb/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics +- [Issue #390](https://github.com/influxdb/influxdb/issues/390). Multiple response.WriteHeader when querying as admin +- [Issue #407](https://github.com/influxdb/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized +- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 + +## v0.5.4 [2014-04-02] + +### Bugfixes + +- [Issue #386](https://github.com/influxdb/influxdb/issues/386). Drop series should work with series containing dots +- [Issue #389](https://github.com/influxdb/influxdb/issues/389). Filtering shouldn't stop prematurely +- [Issue #341](https://github.com/influxdb/influxdb/issues/341). Make the number of shards that are queried in parallel configurable +- [Issue #394](https://github.com/influxdb/influxdb/issues/394). Support count(distinct) and count(DISTINCT) +- [Issue #362](https://github.com/influxdb/influxdb/issues/362). Limit should be enforced after aggregation + +## v0.5.3 [2014-03-31] + +### Bugfixes + +- [Issue #378](https://github.com/influxdb/influxdb/issues/378). Indexing should return if there are no requests added since the last index +- [Issue #370](https://github.com/influxdb/influxdb/issues/370). Filtering and limit should be enforced on the shards +- [Issue #379](https://github.com/influxdb/influxdb/issues/379). Boolean columns should be usable in where clauses +- [Issue #381](https://github.com/influxdb/influxdb/issues/381). Should be able to do deletes as a cluster admin + +## v0.5.2 [2014-03-28] + +### Bugfixes + +- [Issue #342](https://github.com/influxdb/influxdb/issues/342). Data resurrected after a server restart +- [Issue #367](https://github.com/influxdb/influxdb/issues/367). Influxdb won't start if the api port is commented out +- [Issue #355](https://github.com/influxdb/influxdb/issues/355). Return an error on wrong time strings +- [Issue #331](https://github.com/influxdb/influxdb/issues/331). Allow negative time values in the where clause +- [Issue #371](https://github.com/influxdb/influxdb/issues/371). Seris index isn't deleted when the series is dropped +- [Issue #360](https://github.com/influxdb/influxdb/issues/360). Store and recover continuous queries + +## v0.5.1 [2014-03-24] + +### Bugfixes + +- Revert the version of goraft due to a bug found in the latest version + +## v0.5.0 [2014-03-24] + +### Features + +- [Issue #293](https://github.com/influxdb/influxdb/pull/293). Implement a Graphite listener + +### Bugfixes + +- [Issue #340](https://github.com/influxdb/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order + +## v0.5.0-rc.6 [2014-03-20] + +### Bugfixes + +- Increase raft election timeout to avoid unecessary relections +- Sort points before writing them to avoid an explosion in the request + number when the points are written randomly +- [Issue #335](https://github.com/influxdb/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries +- [Issue #318](https://github.com/influxdb/influxdb/pull/318). Support EXPLAIN queries +- [Issue #333](https://github.com/influxdb/influxdb/pull/333). Fail + when the password is too short or too long instead of passing it to + the crypto library + +## v0.5.0-rc.5 [2014-03-11] + +### Bugfixes + +- [Issue #312](https://github.com/influxdb/influxdb/issues/312). WAL should wait for server id to be set before recovering +- [Issue #301](https://github.com/influxdb/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache +- [Issue #319](https://github.com/influxdb/influxdb/issues/319). Propagate engine creation error correctly to the user +- [Issue #316](https://github.com/influxdb/influxdb/issues/316). Make + sure we don't starve goroutines if we get an access denied error + from one of the shards +- [Issue #306](https://github.com/influxdb/influxdb/issues/306). Deleting/Dropping database takes a lot of memory +- [Issue #302](https://github.com/influxdb/influxdb/issues/302). Should be able to set negative timestamps on points +- [Issue #327](https://github.com/influxdb/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 +- [Issue #321](https://github.com/influxdb/influxdb/issues/321). Make sure we split points on shards properly + +## v0.5.0-rc.4 [2014-03-07] + +### Bugfixes + +- [Issue #298](https://github.com/influxdb/influxdb/issues/298). Fix limit when querying multiple shards +- [Issue #305](https://github.com/influxdb/influxdb/issues/305). Shard ids not unique after restart +- [Issue #309](https://github.com/influxdb/influxdb/issues/309). Don't relog the requests on the remote server +- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) + +## v0.5.0-rc.3 [2014-03-03] + +### Bugfixes +- [Issue #69](https://github.com/influxdb/influxdb/issues/69). Support column aliases +- [Issue #287](https://github.com/influxdb/influxdb/issues/287). Make the lru cache size configurable +- [Issue #38](https://github.com/influxdb/influxdb/issues/38). Fix a memory leak discussed in this story +- [Issue #286](https://github.com/influxdb/influxdb/issues/286). Make the number of open shards configurable +- Make LevelDB use the max open files configuration option. + +## v0.5.0-rc.2 [2014-02-27] + +### Bugfixes + +- [Issue #274](https://github.com/influxdb/influxdb/issues/274). Crash after restart +- [Issue #277](https://github.com/influxdb/influxdb/issues/277). Ensure duplicate shards won't be created +- [Issue #279](https://github.com/influxdb/influxdb/issues/279). Limits not working on regex queries +- [Issue #281](https://github.com/influxdb/influxdb/issues/281). `./influxdb -v` should print the sha when building from source +- [Issue #283](https://github.com/influxdb/influxdb/issues/283). Dropping shard and restart in cluster causes panic. +- [Issue #288](https://github.com/influxdb/influxdb/issues/288). Sequence numbers should be unique per server id + +## v0.5.0-rc.1 [2014-02-25] + +### Bugfixes + +- Ensure large deletes don't take too much memory +- [Issue #240](https://github.com/influxdb/influxdb/pull/240). Unable to query against columns with `.` in the name. +- [Issue #250](https://github.com/influxdb/influxdb/pull/250). different result between normal and continuous query with "group by" clause +- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points + +### Features + +- [Issue #243](https://github.com/influxdb/influxdb/issues/243). Should have endpoint to GET a user's attributes. +- [Issue #269](https://github.com/influxdb/influxdb/pull/269), [Issue #65](https://github.com/influxdb/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards +- [Issue #164](https://github.com/influxdb/influxdb/pull/269),[Issue #103](https://github.com/influxdb/influxdb/pull/269),[Issue #166](https://github.com/influxdb/influxdb/pull/269),[Issue #165](https://github.com/influxdb/influxdb/pull/269),[Issue #132](https://github.com/influxdb/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup + +### Deprecated + +- [Issue #189](https://github.com/influxdb/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. +- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points + +## v0.4.4 [2014-02-05] + +### Features + +- Make the leveldb max open files configurable in the toml file + +## v0.4.3 [2014-01-31] + +### Bugfixes + +- [Issue #225](https://github.com/influxdb/influxdb/issues/225). Remove a hard limit on the points returned by the datastore +- [Issue #223](https://github.com/influxdb/influxdb/issues/223). Null values caused count(distinct()) to panic +- [Issue #224](https://github.com/influxdb/influxdb/issues/224). Null values broke replication due to protobuf limitation + +## v0.4.1 [2014-01-30] + +### Features + +- [Issue #193](https://github.com/influxdb/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy +- [Issue #190](https://github.com/influxdb/influxdb/pull/190). Add support for SSL. +- [Issue #194](https://github.com/influxdb/influxdb/pull/194). Should be able to disable Admin interface. + +### Bugfixes + +- [Issue #33](https://github.com/influxdb/influxdb/issues/33). Don't call WriteHeader more than once per request +- [Issue #195](https://github.com/influxdb/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. +- [Issue #199](https://github.com/influxdb/influxdb/issues/199). Make the test timeout configurable +- [Issue #200](https://github.com/influxdb/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail +- [Issue #215](https://github.com/influxdb/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. + +## v0.4.0 [2014-01-17] + +## Features + +- [Issue #86](https://github.com/influxdb/influxdb/issues/86). Support arithmetic expressions in select clause +- [Issue #92](https://github.com/influxdb/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' +- [Issue #88](https://github.com/influxdb/influxdb/issues/88). Support datetime strings +- [Issue #64](https://github.com/influxdb/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) +- [Issue #78](https://github.com/influxdb/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused +- [Issue #102](https://github.com/influxdb/influxdb/issues/102). Support expressions in where condition +- [Issue #101](https://github.com/influxdb/influxdb/issues/101). Support expressions in aggregates +- [Issue #62](https://github.com/influxdb/influxdb/issues/62). Support updating and deleting column values +- [Issue #96](https://github.com/influxdb/influxdb/issues/96). Replicate deletes in a cluster +- [Issue #94](https://github.com/influxdb/influxdb/issues/94). delete queries +- [Issue #116](https://github.com/influxdb/influxdb/issues/116). Use proper logging +- [Issue #40](https://github.com/influxdb/influxdb/issues/40). Use TOML instead of JSON in the config file +- [Issue #99](https://github.com/influxdb/influxdb/issues/99). Support list series in the query language +- [Issue #149](https://github.com/influxdb/influxdb/issues/149). Cluster admins should be able to perform reads and writes. +- [Issue #108](https://github.com/influxdb/influxdb/issues/108). Querying one point using `time =` +- [Issue #114](https://github.com/influxdb/influxdb/issues/114). Servers should periodically check that they're consistent. +- [Issue #93](https://github.com/influxdb/influxdb/issues/93). Should be able to drop a time series +- [Issue #177](https://github.com/influxdb/influxdb/issues/177). Support drop series in the query language. +- [Issue #184](https://github.com/influxdb/influxdb/issues/184). Implement Raft log compaction. +- [Issue #153](https://github.com/influxdb/influxdb/issues/153). Implement continuous queries + +### Bugfixes + +- [Issue #90](https://github.com/influxdb/influxdb/issues/90). Group by multiple columns panic +- [Issue #89](https://github.com/influxdb/influxdb/issues/89). 'Group by' combined with 'where' not working +- [Issue #106](https://github.com/influxdb/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative +- [Issue #105](https://github.com/influxdb/influxdb/issues/105). Panic when using a where clause that reference columns with null values +- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Remove default limits from queries +- [Issue #118](https://github.com/influxdb/influxdb/issues/118). Make column names starting with '_' legal +- [Issue #121](https://github.com/influxdb/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails +- [Issue #127](https://github.com/influxdb/influxdb/issues/127). Return error on delete queries with where condition that don't have time +- [Issue #117](https://github.com/influxdb/influxdb/issues/117). Fill empty groups with default values +- [Issue #150](https://github.com/influxdb/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. +- [Issue #158](https://github.com/influxdb/influxdb/issues/158). Logged deletes should be stored with the time range if missing. +- [Issue #136](https://github.com/influxdb/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays +- [Issue #145](https://github.com/influxdb/influxdb/issues/145). Server fails to join cluster if all starting at same time. +- [Issue #176](https://github.com/influxdb/influxdb/issues/176). Drop database should take effect on all nodes +- [Issue #180](https://github.com/influxdb/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. +- [Issue #182](https://github.com/influxdb/influxdb/issues/182). Queries with invalid limit clause crash the server + +### Deprecated + +- deprecate '==' and '!=' in favor of '=' and '<>', respectively +- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` +- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. +- Querying for column names that don't exist no longer throws an error. + +## v0.3.2 + +## Features + +- [Issue #82](https://github.com/influxdb/influxdb/issues/82). Add endpoint for listing available admin interfaces. +- [Issue #80](https://github.com/influxdb/influxdb/issues/80). Support durations when specifying start and end time +- [Issue #81](https://github.com/influxdb/influxdb/issues/81). Add support for IN + +## Bugfixes + +- [Issue #75](https://github.com/influxdb/influxdb/issues/75). Don't allow time series names that start with underscore +- [Issue #85](https://github.com/influxdb/influxdb/issues/85). Non-existing columns exist after they have been queried before + +## v0.3.0 + +## Features + +- [Issue #51](https://github.com/influxdb/influxdb/issues/51). Implement first and last aggregates +- [Issue #35](https://github.com/influxdb/influxdb/issues/35). Support table aliases in Join Queries +- [Issue #71](https://github.com/influxdb/influxdb/issues/71). Add WillReturnSingleSeries to the Query +- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Limit should default to 10k +- [Issue #59](https://github.com/influxdb/influxdb/issues/59). Add histogram aggregate function + +## Bugfixes + +- Fix join and merges when the query is a descending order query +- [Issue #57](https://github.com/influxdb/influxdb/issues/57). Don't panic when type of time != float +- [Issue #63](https://github.com/influxdb/influxdb/issues/63). Aggregate queries should not have a sequence_number column + +## v0.2.0 + +### Features + +- [Issue #37](https://github.com/influxdb/influxdb/issues/37). Support the negation of the regex matcher !~ +- [Issue #47](https://github.com/influxdb/influxdb/issues/47). Spill out query and database detail at the time of bug report + +### Bugfixes + +- [Issue #36](https://github.com/influxdb/influxdb/issues/36). The regex operator should be =~ not ~= +- [Issue #39](https://github.com/influxdb/influxdb/issues/39). Return proper content types from the http api +- [Issue #42](https://github.com/influxdb/influxdb/issues/42). Make the api consistent with the docs +- [Issue #41](https://github.com/influxdb/influxdb/issues/41). Table/Points not deleted when database is dropped +- [Issue #45](https://github.com/influxdb/influxdb/issues/45). Aggregation shouldn't mess up the order of the points +- [Issue #44](https://github.com/influxdb/influxdb/issues/44). Fix crashes on RHEL 5.9 +- [Issue #34](https://github.com/influxdb/influxdb/issues/34). Ascending order always return null for columns that have a null value +- [Issue #55](https://github.com/influxdb/influxdb/issues/55). Limit should limit the points that match the Where clause +- [Issue #53](https://github.com/influxdb/influxdb/issues/53). Writing null values via HTTP API fails + +### Deprecated + +- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` +- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. + +## v0.1.0 + +### Features + +- [Issue #29](https://github.com/influxdb/influxdb/issues/29). Semicolon is now optional in queries +- [Issue #31](https://github.com/influxdb/influxdb/issues/31). Support Basic Auth as well as query params for authentication. + +### Bugfixes + +- Don't allow creating users with empty username +- [Issue #22](https://github.com/influxdb/influxdb/issues/22). Don't set goroot if it was set +- [Issue #25](https://github.com/influxdb/influxdb/issues/25). Fix queries that use the median aggregator +- [Issue #26](https://github.com/influxdb/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data +- [Issue #27](https://github.com/influxdb/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values +- [Issue #30](https://github.com/influxdb/influxdb/issues/30). Column indexes/names getting off somehow +- [Issue #32](https://github.com/influxdb/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli + +## v0.0.9 + +#### Features + +- Add stddev(...) support +- Better docs, thanks @auxesis and @d-snp. + +#### Bugfixes + +- Set PYTHONPATH and CC appropriately on mac os x. +- [Issue #18](https://github.com/influxdb/influxdb/issues/18). Fix 386 debian and redhat packages +- [Issue #23](https://github.com/influxdb/influxdb/issues/23). Fix the init scripts on redhat + +## v0.0.8 + +#### Features + +- Add a way to reset the root password from the command line. +- Add distinct(..) and derivative(...) support +- Print test coverage if running go1.2 + +#### Bugfixes + +- Fix the default admin site path in the .deb and .rpm packages. +- Fix the configuration filename in the .tar.gz package. + +## v0.0.7 + +#### Features + +- include the admin site in the repo to make it easier for newcomers. + +## v0.0.6 + +#### Features + +- Add count(distinct(..)) support + +#### Bugfixes + +- Reuse levigo read/write options. + +## v0.0.5 + +#### Features + +- Cache passwords in memory to speed up password verification +- Add MERGE and INNER JOIN support + +#### Bugfixes + +- All columns should be returned if `select *` was used +- Read/Write benchmarks + +## v0.0.2 + +#### Features + +- Add an admin UI +- Deb and RPM packages + +#### Bugfixes + +- Fix some nil pointer dereferences +- Cleanup the aggregators implementation + +## v0.0.1 [2013-10-22] + + * Initial Release diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md new file mode 100644 index 000000000..5b619ac64 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md @@ -0,0 +1,247 @@ +Contributing to InfluxDB +======================== + +Bug reports +--------------- +Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following. +* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04. +* The version of InfluxDB you are running +* Whether you installed it using a pre-built package, or built it from source. +* A small test case, if applicable, that demonstrates the issues. + +Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.** +If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) + +Test cases should be in the form of `curl` commands. For example: +``` +# create database +curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb" + +# create retention policy +curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT" + +# write data +curl -X POST http://localhost:8086/write --data-urlencode "db=mydb" --data-binary "cpu,region=useast,host=server_1,service=redis value=61" + +# Delete a Measurement +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu' + +# Query the Measurement +# Bug: expected it to return no data, but data comes back. +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu' +``` +**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. + +Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed. + +Feature requests +--------------- +We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB. + +Contributing to the source code +--------------- + +InfluxDB follows standard Go project structure. This means that all +your go development are done in `$GOPATH/src`. GOPATH can be any +directory under which InfluxDB and all its dependencies will be +cloned. For more details on recommended go project's structure, see +[How to Write Go Code](http://golang.org/doc/code.html) and +[Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/), or you can just follow +the steps below. + +Submitting a pull request +------------ +To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged. + +There will usually be some back and forth as we finalize the change, but once that completes it may be merged. + +To assist in review for the PR, please add the following to your pull request comment: + +```md +- [ ] CHANGELOG.md updated +- [ ] Rebased/mergable +- [ ] Tests pass +- [ ] Sign [CLA](http://influxdb.com/community/cla.html) (if not already signed) +``` + +Use of third-party packages +------------ +A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarly. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) as the storage engine. So to maximise the chance your change will be accepted by us, use only the standard libaries, or the third-party packages we have decided to use. + +For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). + +Signing the CLA +--------------- + +If you are going to be contributing back to InfluxDB please take a +second to sign our CLA, which can be found +[on our website](http://influxdb.com/community/cla.html). + +Installing Go +------------- +InfluxDB requires Go 1.5 or greater. + +At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions +on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). + +After installing gvm you can install and set the default go version by +running the following: + + gvm install go1.5 + gvm use go1.5 --default + +Revision Control Systems +------------- +Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system. +Currently the project only depends on `git` and `mercurial`. + +* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) +* [Install Mercurial](http://mercurial.selenic.com/wiki/Download) + +Getting the source +------ +Setup the project structure and fetch the repo like so: + + mkdir $HOME/gocodez + export GOPATH=$HOME/gocodez + go get github.com/influxdb/influxdb + +You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime. + +Cloning a fork +------------- +If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork: + + export GOPATH=$HOME/gocodez + mkdir -p $GOPATH/src/github.com/influxdb + cd $GOPATH/src/github.com/influxdb + git clone git@github.com:/influxdb + +Retaining the directory structure `$GOPATH/src/github.com/influxdb` is necessary so that Go imports work correctly. + +Pre-commit checks +------------- + +We have a pre commit hook to make sure code is formatted properly +and vetted before you commit any changes. We strongly recommend using the pre +commit hook to guard against accidentally committing unformatted +code. To use the pre-commit hook, run the following: + + cd $GOPATH/src/github.com/influxdb/influxdb + cp .hooks/pre-commit .git/hooks/ + +In case the commit is rejected because it's not formatted you can run +the following to format the code: + +``` +go fmt ./... +go vet ./... +``` + +To install go vet, run the following command: +``` +go get golang.org/x/tools/cmd/vet +``` + +NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above. + +For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet). + +Build and Test +----- + +Make sure you have Go installed and the project structure as shown above. To then build the project, execute the following commands: + +```bash +cd $GOPATH/src/github.com/influxdb +go get -u -f -t ./... +go build ./... +``` + +To then install the binaries, run the following command. They can be found in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`. + +```bash +go install ./... +``` + +To set the version and commit flags during the build pass the following to the build command: + +```bash +-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT" +``` + +where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash. + +If you want to build packages, see `package.sh` help: +```bash +package.sh -h +``` + +To run the tests, execute the following command: + +```bash +cd $GOPATH/src/github.com/influxdb/influxdb +go test -v ./... + +# run tests that match some pattern +go test -run=TestDatabase . -v + +# run tests and show coverage +go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover +``` + +To install go cover, run the following command: +``` +go get golang.org/x/tools/cmd/cover +``` + +Generated Google Protobuf code +----------------- +Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. + +First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/ +) 2.6.1 or later for your OS: + +Then install the go plugins: + +```bash +go get github.com/gogo/protobuf/proto +go get github.com/gogo/protobuf/protoc-gen-gogo +go get github.com/gogo/protobuf/gogoproto +``` + +Finally run, `go generate` after updating any `*.proto` file: + +```bash +go generate ./... +``` +**Trouleshooting** + +If generating the protobuf code is failing for you, check each of the following: + * Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. + * Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. + +Profiling +----- +When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU or memory profiling turned on. For example: + +```sh +# start influx with profiling +./influxd -cpuprofile influxd.prof +# run queries, writes, whatever you're testing +# open up pprof +go tool pprof influxd influxd.prof +# once inside run "web", opens up browser with the CPU graph +# can also run "web " to zoom in. Or "list " to see specific lines +``` + +Continuous Integration testing +----- +InfluxDB uses CirceCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdb/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file. + +Useful links +------------ +- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go) +- [Go in production](http://peter.bourgon.org/go-in-production/) +- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/) +- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md new file mode 100644 index 000000000..e78187d9b --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md @@ -0,0 +1,44 @@ +# Docker Setup +======================== + +This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment. + +## Building Image + +To build a docker image for InfluxDB from your current checkout, run the following: + +``` +$ ./build-docker.sh +``` + +This script uses the `golang:1.5` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image. + +To build the image using a different version of go: + +``` +$ GO_VER=1.4.2 ./build-docker.sh +``` + +Available version can be found [here](https://hub.docker.com/_/golang/). + +## Single Node Container + +This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually. + +``` +$ docker run -it -p 8086:8086 -p 8088:8088 influxdb +``` + +## Multi-Node Cluster + +This will create a simple 3-node cluster. The data is stored within the container and will be lost when the container is removed. This is only useful for test clusters. + +The `HOST_IP` env variable should be your host IP if running under linux or the virtualbox VM IP if running under OSX. On OSX, this would be something like: `$(docker-machine ip dev)` or `$(boot2docker ip)` depending on which docker tool you are using. + +``` +$ export HOST_IP= +$ docker run -it -p 8086:8086 -p 8088:8088 influxdb -hostname $HOST_IP:8088 +$ docker run -it -p 8186:8086 -p 8188:8088 influxdb -hostname $HOST_IP:8188 -join $HOST_IP:8088 +$ docker run -it -p 8286:8086 -p 8288:8088 influxdb -hostname $HOST_IP:8288 -join $HOST_IP:8088 +``` + diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile b/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile new file mode 100644 index 000000000..d30cd300d --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile @@ -0,0 +1,24 @@ +FROM busybox:ubuntu-14.04 + +MAINTAINER Jason Wilder "" + +# admin, http, udp, cluster, graphite, opentsdb, collectd +EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826 + +WORKDIR /app + +# copy binary into image +COPY influxd /app/ + +# Add influxd to the PATH +ENV PATH=/app:$PATH + +# Generate a default config +RUN influxd config > /etc/influxdb.toml + +# Use /data for all disk storage +RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml + +VOLUME ["/data"] + +ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"] diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 b/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 new file mode 100644 index 000000000..caaf81dc9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 @@ -0,0 +1,12 @@ +FROM 32bit/ubuntu:14.04 + +RUN apt-get update && apt-get install -y python-software-properties software-properties-common git +RUN add-apt-repository ppa:evarlast/golang1.5 +RUN apt-get update && apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go + +ENV GOPATH=/root/go +RUN mkdir -p /root/go/src/github.com/influxdb/influxdb +RUN mkdir -p /tmp/artifacts + +VOLUME /root/go/src/github.com/influxdb/influxdb +VOLUME /tmp/artifacts diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE new file mode 100644 index 000000000..d50222706 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013-2015 Errplane Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md new file mode 100644 index 000000000..abba2b241 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md @@ -0,0 +1,19 @@ +# List +- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) +- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) +- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) +- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) +- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) +- github.com/rakyll/statik/fs [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) +- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE) +- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) +- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE) +- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) +- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) +- glyphicons [LICENSE](http://glyphicons.com/license/) +- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) +- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) +- golang.org/x/crypto/bcrypt [BSD LICENSE](https://go.googlesource.com/crypto/+/master/LICENSE) + diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md new file mode 100644 index 000000000..8491aa7ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md @@ -0,0 +1,180 @@ +The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field name, field value, tag name, or tag value appears it should be wrapped in double quotes. + +# Databases & retention policies + +```sql +-- create a database +CREATE DATABASE + +-- create a retention policy +CREATE RETENTION POLICY ON DURATION REPLICATION [DEFAULT] + +-- alter retention policy +ALTER RETENTION POLICY ON (DURATION | REPLICATION | DEFAULT)+ + +-- drop a database +DROP DATABASE + +-- drop a retention policy +DROP RETENTION POLICY ON +``` +where `` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `` must be an integer. + +If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads. + +# Users and permissions + +```sql +-- create user +CREATE USER WITH PASSWORD '' + +-- grant privilege on a database +GRANT ON TO + +-- grant cluster admin privileges +GRANT ALL [PRIVILEGES] TO + +-- revoke privilege +REVOKE ON FROM + +-- revoke all privileges for a DB +REVOKE ALL [PRIVILEGES] ON FROM + +-- revoke all privileges including cluster admin +REVOKE ALL [PRIVILEGES] FROM + +-- combine db creation with privilege assignment (user must already exist) +CREATE DATABASE GRANT TO +CREATE DATABASE REVOKE FROM + +-- delete a user +DROP USER + + +``` +where ` := READ | WRITE | All `. + +Authentication must be enabled in the influxdb.conf file for user permissions to be in effect. + +By default, newly created users have no privileges to any databases. + +Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements. + +# Select + +```sql +SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m) + +SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region +``` + +## Group By + +# Delete + +# Series + +## Destroy + +```sql +DROP MEASUREMENT +DROP MEASUREMENT cpu WHERE region = 'uswest' +``` + +## Show + +Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery. + +```sql +-- show all databases +SHOW DATABASES + +-- show measurement names +SHOW MEASUREMENTS +SHOW MEASUREMENTS LIMIT 15 +SHOW MEASUREMENTS LIMIT 10 OFFSET 40 +SHOW MEASUREMENTS WHERE service = 'redis' +-- LIMIT and OFFSET can be applied to any of the SHOW type queries + +-- show all series across all measurements/tagsets +SHOW SERIES + +-- get a show of all series for any measurements where tag key region = tak value 'uswest' +SHOW SERIES WHERE region = 'uswest' + +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 + +-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns +-- series split into measurements. Each series counts as a row. So you could see only a +-- single measurement returned, but 10 series within it. +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100 + +-- show all retention policies on a database +SHOW RETENTION POLICIES ON mydb + +-- get a show of all tag keys across all measurements +SHOW TAG KEYS + +-- show all the tag keys for a given measurement +SHOW TAG KEYS FROM cpu +SHOW TAG KEYS FROM temperature, wind_speed + +-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required +SHOW TAG VALUES WITH TAG KEY = 'region' +SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host' + +-- and you can do stuff against fields +SHOW FIELD KEYS FROM cpu + +-- but you can't do this +SHOW FIELD VALUES +-- we don't index field values, so this query should be invalid. + +-- show all users +SHOW USERS +``` + +Note that `FROM` and `WHERE` are optional clauses in most of the show series queries. + +And the show series output looks like this: + +```json +[ + { + "name": "cpu", + "columns": ["id", "region", "host"], + "values": [ + 1, "uswest", "servera", + 2, "uswest", "serverb" + ] + }, + { + "name": "reponse_time", + "columns": ["id", "application", "host"], + "values": [ + 3, "myRailsApp", "servera" + ] + } +] +``` + +# Continuous Queries + +Continous queries are going to be inspired by MySQL `TRIGGER` syntax: + +http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html + +Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention, +particularly in the case where creation is scripted. + +## Create + + CREATE CONTINUOUS QUERY AS SELECT ... FROM ... + +## Destroy + + DROP CONTINUOUS QUERY + +## List + + SHOW CONTINUOUS QUERIES diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/README.md new file mode 100644 index 000000000..586b5613d --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/README.md @@ -0,0 +1,72 @@ +# InfluxDB [![Circle CI](https://circleci.com/gh/influxdb/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdb/influxdb/tree/master) + +## An Open-Source, Distributed, Time Series Database + +> InfluxDB v0.9.0 is now out. Going forward, the 0.9.x series of releases will not make breaking API changes or breaking changes to the underlying data storage. However, 0.9.0 clustering should be considered an alpha release. + +InfluxDB is an open source **distributed time series database** with +**no external dependencies**. It's useful for recording metrics, +events, and performing analytics. + +## Features + +* Built-in [HTTP API](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html) so you don't have to write any server side code to get up and running. +* Data can be tagged, allowing very flexible querying. +* SQL-like query language. +* Clustering is supported out of the box, so that you can scale horizontally to handle your data. +* Simple to install and manage, and fast to get data in and out. +* It aims to answer queries in real-time. That means every data point is + indexed as it comes in and is immediately available in queries that + should return in < 100ms. + +## Getting Started +*The following directions apply only to the 0.9.0 release or building from the source on master.* + +### Building + +You don't need to build the project to use it - you can use any of our +[pre-built packages](http://influxdb.com/download/index.html) to install InfluxDB. That's +the recommended way to get it running. However, if you want to contribute to the core of InfluxDB, you'll need to build. +For those adventurous enough, you can +[follow along on our docs](http://github.com/influxdb/influxdb/blob/master/CONTRIBUTING.md). + +### Starting InfluxDB +* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. +* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later. +* `$GOPATH/bin/influxd` if you have built InfluxDB from source. + +### Creating your first database + +``` +curl -G 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb" +``` + +### Insert some data +``` +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server01,region=uswest load=42 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server02,region=uswest load=78 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000' +``` + +### Query for the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now - 1d" +``` + +### Analyze the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" +``` + +## Helpful Links + +* Understand the [design goals and motivations of the project](http://influxdb.com/docs/v0.9/introduction/overview.html). +* Follow the [getting started guide](http://influxdb.com/docs/v0.9/introduction/getting_started.html) to find out how to install InfluxDB, start writing more data, and issue more queries - in just a few minutes. +* See the [HTTP API documentation to start writing a library for your favorite language](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html). diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer.go new file mode 100644 index 000000000..25abbf6f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer.go @@ -0,0 +1,78 @@ +package influxdb + +import ( + "math/rand" + + "github.com/influxdb/influxdb/meta" +) + +// Balancer represents a load-balancing algorithm for a set of nodes +type Balancer interface { + // Next returns the next Node according to the balancing method + // or nil if there are no nodes available + Next() *meta.NodeInfo +} + +type nodeBalancer struct { + nodes []meta.NodeInfo // data nodes to balance between + p int // current node index +} + +// NewNodeBalancer create a shuffled, round-robin balancer so that +// multiple instances will return nodes in randomized order and each +// each returned node will be repeated in a cycle +func NewNodeBalancer(nodes []meta.NodeInfo) Balancer { + // make a copy of the node slice so we can randomize it + // without affecting the original instance as well as ensure + // that each Balancer returns nodes in a different order + b := &nodeBalancer{} + + b.nodes = make([]meta.NodeInfo, len(nodes)) + copy(b.nodes, nodes) + + b.shuffle() + return b +} + +// shuffle randomizes the ordering the balancers available nodes +func (b *nodeBalancer) shuffle() { + for i := range b.nodes { + j := rand.Intn(i + 1) + b.nodes[i], b.nodes[j] = b.nodes[j], b.nodes[i] + } +} + +// online returns a slice of the nodes that are online +func (b *nodeBalancer) online() []meta.NodeInfo { + return b.nodes + // now := time.Now().UTC() + // up := []meta.NodeInfo{} + // for _, n := range b.nodes { + // if n.OfflineUntil.After(now) { + // continue + // } + // up = append(up, n) + // } + // return up +} + +// Next returns the next available nodes +func (b *nodeBalancer) Next() *meta.NodeInfo { + // only use online nodes + up := b.online() + + // no nodes online + if len(up) == 0 { + return nil + } + + // rollover back to the beginning + if b.p >= len(up) { + b.p = 0 + } + + d := &up[b.p] + b.p += 1 + + return d +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer_test.go new file mode 100644 index 000000000..ca1942c33 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer_test.go @@ -0,0 +1,115 @@ +package influxdb_test + +import ( + "fmt" + "testing" + + "github.com/influxdb/influxdb" + "github.com/influxdb/influxdb/meta" +) + +func NewNodes() []meta.NodeInfo { + var nodes []meta.NodeInfo + for i := 1; i <= 2; i++ { + nodes = append(nodes, meta.NodeInfo{ + ID: uint64(i), + Host: fmt.Sprintf("localhost:999%d", i), + }) + } + return nodes +} + +func TestBalancerEmptyNodes(t *testing.T) { + b := influxdb.NewNodeBalancer([]meta.NodeInfo{}) + got := b.Next() + if got != nil { + t.Errorf("expected nil, got %v", got) + } +} + +func TestBalancerUp(t *testing.T) { + nodes := NewNodes() + b := influxdb.NewNodeBalancer(nodes) + + // First node in randomized round-robin order + first := b.Next() + if first == nil { + t.Errorf("expected datanode, got %v", first) + } + + // Second node in randomized round-robin order + second := b.Next() + if second == nil { + t.Errorf("expected datanode, got %v", second) + } + + // Should never get the same node in order twice + if first.ID == second.ID { + t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) + } +} + +/* +func TestBalancerDown(t *testing.T) { + nodes := NewNodes() + b := influxdb.NewNodeBalancer(nodes) + + nodes[0].Down() + + // First node in randomized round-robin order + first := b.Next() + if first == nil { + t.Errorf("expected datanode, got %v", first) + } + + // Second node should rollover to the first up node + second := b.Next() + if second == nil { + t.Errorf("expected datanode, got %v", second) + } + + // Health node should be returned each time + if first.ID != 2 && first.ID != second.ID { + t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) + } +} +*/ + +/* +func TestBalancerBackUp(t *testing.T) { + nodes := newDataNodes() + b := influxdb.NewNodeBalancer(nodes) + + nodes[0].Down() + + for i := 0; i < 3; i++ { + got := b.Next() + if got == nil { + t.Errorf("expected datanode, got %v", got) + } + + if exp := uint64(2); got.ID != exp { + t.Errorf("wrong node id: exp %v, got %v", exp, got.ID) + } + } + + nodes[0].Up() + + // First node in randomized round-robin order + first := b.Next() + if first == nil { + t.Errorf("expected datanode, got %v", first) + } + + // Second node should rollover to the first up node + second := b.Next() + if second == nil { + t.Errorf("expected datanode, got %v", second) + } + + // Should get both nodes returned + if first.ID == second.ID { + t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) + } +} +*/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh new file mode 100644 index 000000000..0dea62d2a --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +set -e -x + +GO_VER=${GO_VER:-1.5} + +docker run -it -v "${GOPATH}":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd' + +docker build -t influxdb . diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh new file mode 100644 index 000000000..092582c44 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh @@ -0,0 +1,95 @@ +#!/bin/bash +# +# This is the InfluxDB CircleCI test script. Using this script allows total control +# the environment in which the build and test is run, and matches the official +# build process for InfluxDB. + +BUILD_DIR=$HOME/influxdb-build +GO_VERSION=go1.5 +PARALLELISM="-parallel 256" +TIMEOUT="-timeout 480s" + +# Executes the given statement, and exits if the command returns a non-zero code. +function exit_if_fail { + command=$@ + echo "Executing '$command'" + $command + rc=$? + if [ $rc -ne 0 ]; then + echo "'$command' returned $rc." + exit $rc + fi +} + +# Check that go fmt has been run. +function check_go_fmt { + fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l` + if [ $fmtcount -gt 0 ]; then + echo "run 'go fmt ./...' to format your source code." + exit 1 + fi +} + +# Check that go vet passes. +function check_go_vet { + # Due to the way composites work, vet will fail for some of our tests so we ignore it + vetcount=`go tool vet --composites=false ./ 2>&1 | wc -l` + if [ $vetcount -gt 0 ]; then + echo "run 'go tool vet --composites=false ./' to see the errors it flags and correct your source code." + exit 1 + fi +} + +source $HOME/.gvm/scripts/gvm +exit_if_fail gvm use $GO_VERSION + +# Set up the build directory, and then GOPATH. +exit_if_fail mkdir $BUILD_DIR +export GOPATH=$BUILD_DIR +exit_if_fail mkdir -p $GOPATH/src/github.com/influxdb + +# Dump some test config to the log. +echo "Test configuration" +echo "========================================" +echo "\$HOME: $HOME" +echo "\$GOPATH: $GOPATH" +echo "\$CIRCLE_BRANCH: $CIRCLE_BRANCH" + +# Move the checked-out source to a better location. +exit_if_fail mv $HOME/influxdb $GOPATH/src/github.com/influxdb +exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb +exit_if_fail git branch --set-upstream-to=origin/$CIRCLE_BRANCH $CIRCLE_BRANCH + +# Install the code. +exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb +exit_if_fail go get -t -d -v ./... +exit_if_fail git checkout $CIRCLE_BRANCH # 'go get' switches to master. Who knew? Switch back. +check_go_fmt +check_go_vet +exit_if_fail go build -v ./... + +# Run the tests. +case $CIRCLE_NODE_INDEX in + 0) + go test $PARALLELISM $TIMEOUT -v ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs.txt + rc=${PIPESTATUS[0]} + ;; + 1) + # 32bit tests. + if [[ -e ~/docker/image.tar ]]; then docker load -i ~/docker/image.tar; fi + docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test . + mkdir -p ~/docker; docker save ubuntu-32-influxdb-test > ~/docker/image.tar + exit_if_fail docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test . + docker run -v $(pwd):/root/go/src/github.com/influxdb/influxdb -e "CI=${CI}" \ + -v ${CIRCLE_ARTIFACTS}:/tmp/artifacts \ + -t ubuntu-32-influxdb-test bash \ + -c "cd /root/go/src/github.com/influxdb/influxdb && go get -t -d -v ./... && go build -v ./... && go test ${PARALLELISM} ${TIMEOUT} -v ./... 2>&1 | tee /tmp/artifacts/test_logs_i386.txt && exit \${PIPESTATUS[0]}" + rc=$? + ;; + 2) + GORACE="halt_on_error=1" go test $PARALLELISM $TIMEOUT -v -race ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs_race.txt + rc=${PIPESTATUS[0]} + ;; +esac + +exit $rc diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml b/Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml new file mode 100644 index 000000000..01a5a161a --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml @@ -0,0 +1,16 @@ +machine: + services: + - docker + pre: + - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) + - source $HOME/.gvm/scripts/gvm; gvm install go1.5 --binary + +dependencies: + override: + - mkdir -p ~/docker + cache_directories: + - "~/docker" +test: + override: + - bash circle-test.sh: + parallel: true diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md index 883941e90..8d94a7579 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md @@ -45,7 +45,12 @@ the configuration below. package main import "github.com/influxdb/influxdb/client" -import "net/url" +import ( + "net/url" + "fmt" + "log" + "os" +) const ( MyHost = "localhost" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go index c4f34d84c..2100fb920 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go @@ -79,6 +79,7 @@ type Config struct { Password string UserAgent string Timeout time.Duration + Precision string } // NewConfig will create a config to be used in connecting to the client @@ -95,6 +96,7 @@ type Client struct { password string httpClient *http.Client userAgent string + precision string } const ( @@ -112,6 +114,7 @@ func NewClient(c Config) (*Client, error) { password: c.Password, httpClient: &http.Client{Timeout: c.Timeout}, userAgent: c.UserAgent, + precision: c.Precision, } if client.userAgent == "" { client.userAgent = "InfluxDBClient" @@ -125,6 +128,11 @@ func (c *Client) SetAuth(u, p string) { c.password = p } +// SetPrecision will update the precision +func (c *Client) SetPrecision(precision string) { + c.precision = precision +} + // Query sends a command to the server and returns the Response func (c *Client) Query(q Query) (*Response, error) { u := c.url @@ -133,6 +141,9 @@ func (c *Client) Query(q Query) (*Response, error) { values := u.Query() values.Set("q", q.Command) values.Set("db", q.Database) + if c.precision != "" { + values.Set("epoch", c.precision) + } u.RawQuery = values.Encode() req, err := http.NewRequest("GET", u.String(), nil) @@ -449,7 +460,11 @@ func (p *Point) MarshalJSON() ([]byte, error) { } func (p *Point) MarshalString() string { - return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String() + pt := tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time) + if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { + return pt.String() + } + return pt.PrecisionString(p.Precision) } // UnmarshalJSON decodes the data into the Point struct diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go index 0a6df042e..34cedc446 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go @@ -498,13 +498,12 @@ func TestBatchPoints_Normal(t *testing.T) { } func TestClient_Timeout(t *testing.T) { + done := make(chan bool) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(1 * time.Second) - var data client.Response - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(data) + <-done })) defer ts.Close() + defer func() { done <- true }() u, _ := url.Parse(ts.URL) config := client.Config{URL: *u, Timeout: 500 * time.Millisecond} @@ -517,13 +516,33 @@ func TestClient_Timeout(t *testing.T) { _, err = c.Query(query) if err == nil { t.Fatalf("unexpected success. expected timeout error") - } else if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("unexpected error. expected 'use of closed network connection' error, got %v", err) + } else if !strings.Contains(err.Error(), "request canceled") && + !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("unexpected error. expected 'request canceled' error, got %v", err) + } +} + +func TestClient_NoTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } - confignotimeout := client.Config{URL: *u} - cnotimeout, err := client.NewClient(confignotimeout) - _, err = cnotimeout.Query(query) + query := client.Query{} + _, err = c.Query(query) if err != nil { t.Fatalf("unexpected error. expected %v, actual %v", nil, err) } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go new file mode 100644 index 000000000..fed7e18e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go @@ -0,0 +1,57 @@ +package cluster + +import ( + "net" + "sync" + + "gopkg.in/fatih/pool.v2" +) + +type clientPool struct { + mu sync.RWMutex + pool map[uint64]pool.Pool +} + +func newClientPool() *clientPool { + return &clientPool{ + pool: make(map[uint64]pool.Pool), + } +} + +func (c *clientPool) setPool(nodeID uint64, p pool.Pool) { + c.mu.Lock() + c.pool[nodeID] = p + c.mu.Unlock() +} + +func (c *clientPool) getPool(nodeID uint64) (pool.Pool, bool) { + c.mu.RLock() + p, ok := c.pool[nodeID] + c.mu.RUnlock() + return p, ok +} + +func (c *clientPool) size() int { + c.mu.RLock() + var size int + for _, p := range c.pool { + size += p.Len() + } + c.mu.RUnlock() + return size +} + +func (c *clientPool) conn(nodeID uint64) (net.Conn, error) { + c.mu.RLock() + conn, err := c.pool[nodeID].Get() + c.mu.RUnlock() + return conn, err +} + +func (c *clientPool) close() { + c.mu.Lock() + for _, p := range c.pool { + p.Close() + } + c.mu.Unlock() +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go new file mode 100644 index 000000000..3a67b32d0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go @@ -0,0 +1,35 @@ +package cluster + +import ( + "time" + + "github.com/influxdb/influxdb/toml" +) + +const ( + // DefaultWriteTimeout is the default timeout for a complete write to succeed. + DefaultWriteTimeout = 5 * time.Second + + // DefaultShardWriterTimeout is the default timeout set on shard writers. + DefaultShardWriterTimeout = 5 * time.Second + + // DefaultShardMapperTimeout is the default timeout set on shard mappers. + DefaultShardMapperTimeout = 5 * time.Second +) + +// Config represents the configuration for the clustering service. +type Config struct { + ForceRemoteShardMapping bool `toml:"force-remote-mapping"` + WriteTimeout toml.Duration `toml:"write-timeout"` + ShardWriterTimeout toml.Duration `toml:"shard-writer-timeout"` + ShardMapperTimeout toml.Duration `toml:"shard-mapper-timeout"` +} + +// NewConfig returns an instance of Config with defaults. +func NewConfig() Config { + return Config{ + WriteTimeout: toml.Duration(DefaultWriteTimeout), + ShardWriterTimeout: toml.Duration(DefaultShardWriterTimeout), + ShardMapperTimeout: toml.Duration(DefaultShardMapperTimeout), + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go new file mode 100644 index 000000000..db5e5ddc1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go @@ -0,0 +1,27 @@ +package cluster_test + +import ( + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdb/influxdb/cluster" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c cluster.Config + if _, err := toml.Decode(` +shard-writer-timeout = "10s" +write-timeout = "20s" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if time.Duration(c.ShardWriterTimeout) != 10*time.Second { + t.Fatalf("unexpected shard-writer timeout: %s", c.ShardWriterTimeout) + } else if time.Duration(c.WriteTimeout) != 20*time.Second { + t.Fatalf("unexpected write timeout s: %s", c.WriteTimeout) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go new file mode 100644 index 000000000..4fa3f34b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go @@ -0,0 +1,155 @@ +// Code generated by protoc-gen-gogo. +// source: internal/data.proto +// DO NOT EDIT! + +/* +Package internal is a generated protocol buffer package. + +It is generated from these files: + internal/data.proto + +It has these top-level messages: + WriteShardRequest + WriteShardResponse + MapShardRequest + MapShardResponse +*/ +package internal + +import proto "github.com/gogo/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type WriteShardRequest struct { + ShardID *uint64 `protobuf:"varint,1,req" json:"ShardID,omitempty"` + Points [][]byte `protobuf:"bytes,2,rep" json:"Points,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WriteShardRequest) Reset() { *m = WriteShardRequest{} } +func (m *WriteShardRequest) String() string { return proto.CompactTextString(m) } +func (*WriteShardRequest) ProtoMessage() {} + +func (m *WriteShardRequest) GetShardID() uint64 { + if m != nil && m.ShardID != nil { + return *m.ShardID + } + return 0 +} + +func (m *WriteShardRequest) GetPoints() [][]byte { + if m != nil { + return m.Points + } + return nil +} + +type WriteShardResponse struct { + Code *int32 `protobuf:"varint,1,req" json:"Code,omitempty"` + Message *string `protobuf:"bytes,2,opt" json:"Message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WriteShardResponse) Reset() { *m = WriteShardResponse{} } +func (m *WriteShardResponse) String() string { return proto.CompactTextString(m) } +func (*WriteShardResponse) ProtoMessage() {} + +func (m *WriteShardResponse) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *WriteShardResponse) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type MapShardRequest struct { + ShardID *uint64 `protobuf:"varint,1,req" json:"ShardID,omitempty"` + Query *string `protobuf:"bytes,2,req" json:"Query,omitempty"` + ChunkSize *int32 `protobuf:"varint,3,req" json:"ChunkSize,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MapShardRequest) Reset() { *m = MapShardRequest{} } +func (m *MapShardRequest) String() string { return proto.CompactTextString(m) } +func (*MapShardRequest) ProtoMessage() {} + +func (m *MapShardRequest) GetShardID() uint64 { + if m != nil && m.ShardID != nil { + return *m.ShardID + } + return 0 +} + +func (m *MapShardRequest) GetQuery() string { + if m != nil && m.Query != nil { + return *m.Query + } + return "" +} + +func (m *MapShardRequest) GetChunkSize() int32 { + if m != nil && m.ChunkSize != nil { + return *m.ChunkSize + } + return 0 +} + +type MapShardResponse struct { + Code *int32 `protobuf:"varint,1,req" json:"Code,omitempty"` + Message *string `protobuf:"bytes,2,opt" json:"Message,omitempty"` + Data []byte `protobuf:"bytes,3,opt" json:"Data,omitempty"` + TagSets []string `protobuf:"bytes,4,rep" json:"TagSets,omitempty"` + Fields []string `protobuf:"bytes,5,rep" json:"Fields,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MapShardResponse) Reset() { *m = MapShardResponse{} } +func (m *MapShardResponse) String() string { return proto.CompactTextString(m) } +func (*MapShardResponse) ProtoMessage() {} + +func (m *MapShardResponse) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *MapShardResponse) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +func (m *MapShardResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *MapShardResponse) GetTagSets() []string { + if m != nil { + return m.TagSets + } + return nil +} + +func (m *MapShardResponse) GetFields() []string { + if m != nil { + return m.Fields + } + return nil +} + +func init() { +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto new file mode 100644 index 000000000..fed14bad9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto @@ -0,0 +1,25 @@ +package internal; + +message WriteShardRequest { + required uint64 ShardID = 1; + repeated bytes Points = 2; +} + +message WriteShardResponse { + required int32 Code = 1; + optional string Message = 2; +} + +message MapShardRequest { + required uint64 ShardID = 1; + required string Query = 2; + required int32 ChunkSize = 3; +} + +message MapShardResponse { + required int32 Code = 1; + optional string Message = 2; + optional bytes Data = 3; + repeated string TagSets = 4; + repeated string Fields = 5; +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go new file mode 100644 index 000000000..157b80d0a --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go @@ -0,0 +1,346 @@ +package cluster + +import ( + "errors" + "expvar" + "fmt" + "log" + "os" + "strings" + "sync" + "time" + + "github.com/influxdb/influxdb" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/tsdb" +) + +// ConsistencyLevel represent a required replication criteria before a write can +// be returned as successful +type ConsistencyLevel int + +// The statistics generated by the "write" mdoule +const ( + statWriteReq = "req" + statPointWriteReq = "point_req" + statPointWriteReqLocal = "point_req_local" + statPointWriteReqRemote = "point_req_remote" + statWriteOK = "write_ok" + statWritePartial = "write_partial" + statWriteTimeout = "write_timeout" + statWriteErr = "write_error" + statWritePointReqHH = "point_req_hh" +) + +const ( + // ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet + ConsistencyLevelAny ConsistencyLevel = iota + + // ConsistencyLevelOne requires at least one data node acknowledged a write + ConsistencyLevelOne + + // ConsistencyLevelOne requires a quorum of data nodes to acknowledge a write + ConsistencyLevelQuorum + + // ConsistencyLevelAll requires all data nodes to acknowledge a write + ConsistencyLevelAll +) + +var ( + // ErrTimeout is returned when a write times out. + ErrTimeout = errors.New("timeout") + + // ErrPartialWrite is returned when a write partially succeeds but does + // not meet the requested consistency level. + ErrPartialWrite = errors.New("partial write") + + // ErrWriteFailed is returned when no writes succeeded. + ErrWriteFailed = errors.New("write failed") + + // ErrInvalidConsistencyLevel is returned when parsing the string version + // of a consistency level. + ErrInvalidConsistencyLevel = errors.New("invalid consistency level") +) + +func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { + switch strings.ToLower(level) { + case "any": + return ConsistencyLevelAny, nil + case "one": + return ConsistencyLevelOne, nil + case "quorum": + return ConsistencyLevelQuorum, nil + case "all": + return ConsistencyLevelAll, nil + default: + return 0, ErrInvalidConsistencyLevel + } +} + +// PointsWriter handles writes across multiple local and remote data nodes. +type PointsWriter struct { + mu sync.RWMutex + closing chan struct{} + WriteTimeout time.Duration + Logger *log.Logger + + MetaStore interface { + NodeID() uint64 + Database(name string) (di *meta.DatabaseInfo, err error) + RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) + CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) + } + + TSDBStore interface { + CreateShard(database, retentionPolicy string, shardID uint64) error + WriteToShard(shardID uint64, points []tsdb.Point) error + } + + ShardWriter interface { + WriteShard(shardID, ownerID uint64, points []tsdb.Point) error + } + + HintedHandoff interface { + WriteShard(shardID, ownerID uint64, points []tsdb.Point) error + } + + statMap *expvar.Map +} + +// NewPointsWriter returns a new instance of PointsWriter for a node. +func NewPointsWriter() *PointsWriter { + return &PointsWriter{ + closing: make(chan struct{}), + WriteTimeout: DefaultWriteTimeout, + Logger: log.New(os.Stderr, "[write] ", log.LstdFlags), + statMap: influxdb.NewStatistics("write", "write", nil), + } +} + +// ShardMapping contains a mapping of a shards to a points. +type ShardMapping struct { + Points map[uint64][]tsdb.Point // The points associated with a shard ID + Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID +} + +// NewShardMapping creates an empty ShardMapping +func NewShardMapping() *ShardMapping { + return &ShardMapping{ + Points: map[uint64][]tsdb.Point{}, + Shards: map[uint64]*meta.ShardInfo{}, + } +} + +// MapPoint maps a point to shard +func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p tsdb.Point) { + points, ok := s.Points[shardInfo.ID] + if !ok { + s.Points[shardInfo.ID] = []tsdb.Point{p} + } else { + s.Points[shardInfo.ID] = append(points, p) + } + s.Shards[shardInfo.ID] = shardInfo +} + +func (w *PointsWriter) Open() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closing == nil { + w.closing = make(chan struct{}) + } + return nil +} + +func (w *PointsWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closing != nil { + close(w.closing) + w.closing = nil + } + return nil +} + +// MapShards maps the points contained in wp to a ShardMapping. If a point +// maps to a shard group or shard that does not currently exist, it will be +// created before returning the mapping. +func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) { + + // holds the start time ranges for required shard groups + timeRanges := map[time.Time]*meta.ShardGroupInfo{} + + rp, err := w.MetaStore.RetentionPolicy(wp.Database, wp.RetentionPolicy) + if err != nil { + return nil, err + } + + for _, p := range wp.Points { + timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] = nil + } + + // holds all the shard groups and shards that are required for writes + for t := range timeRanges { + sg, err := w.MetaStore.CreateShardGroupIfNotExists(wp.Database, wp.RetentionPolicy, t) + if err != nil { + return nil, err + } + timeRanges[t] = sg + } + + mapping := NewShardMapping() + for _, p := range wp.Points { + sg := timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] + sh := sg.ShardFor(p.HashID()) + mapping.MapPoint(&sh, p) + } + return mapping, nil +} + +// WritePoints writes across multiple local and remote data nodes according the consistency level. +func (w *PointsWriter) WritePoints(p *WritePointsRequest) error { + w.statMap.Add(statWriteReq, 1) + w.statMap.Add(statPointWriteReq, int64(len(p.Points))) + + if p.RetentionPolicy == "" { + db, err := w.MetaStore.Database(p.Database) + if err != nil { + return err + } else if db == nil { + return influxdb.ErrDatabaseNotFound(p.Database) + } + p.RetentionPolicy = db.DefaultRetentionPolicy + } + + shardMappings, err := w.MapShards(p) + if err != nil { + return err + } + + // Write each shard in it's own goroutine and return as soon + // as one fails. + ch := make(chan error, len(shardMappings.Points)) + for shardID, points := range shardMappings.Points { + go func(shard *meta.ShardInfo, database, retentionPolicy string, points []tsdb.Point) { + ch <- w.writeToShard(shard, p.Database, p.RetentionPolicy, p.ConsistencyLevel, points) + }(shardMappings.Shards[shardID], p.Database, p.RetentionPolicy, points) + } + + for range shardMappings.Points { + select { + case <-w.closing: + return ErrWriteFailed + case err := <-ch: + if err != nil { + return err + } + } + } + return nil +} + +// writeToShards writes points to a shard and ensures a write consistency level has been met. If the write +// partially succeeds, ErrPartialWrite is returned. +func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, + consistency ConsistencyLevel, points []tsdb.Point) error { + // The required number of writes to achieve the requested consistency level + required := len(shard.Owners) + switch consistency { + case ConsistencyLevelAny, ConsistencyLevelOne: + required = 1 + case ConsistencyLevelQuorum: + required = required/2 + 1 + } + + // response channel for each shard writer go routine + type AsyncWriteResult struct { + Owner meta.ShardOwner + Err error + } + ch := make(chan *AsyncWriteResult, len(shard.Owners)) + + for _, owner := range shard.Owners { + go func(shardID uint64, owner meta.ShardOwner, points []tsdb.Point) { + if w.MetaStore.NodeID() == owner.NodeID { + w.statMap.Add(statPointWriteReqLocal, int64(len(points))) + + err := w.TSDBStore.WriteToShard(shardID, points) + // If we've written to shard that should exist on the current node, but the store has + // not actually created this shard, tell it to create it and retry the write + if err == tsdb.ErrShardNotFound { + err = w.TSDBStore.CreateShard(database, retentionPolicy, shardID) + if err != nil { + ch <- &AsyncWriteResult{owner, err} + return + } + err = w.TSDBStore.WriteToShard(shardID, points) + } + ch <- &AsyncWriteResult{owner, err} + return + } + + w.statMap.Add(statPointWriteReqRemote, int64(len(points))) + err := w.ShardWriter.WriteShard(shardID, owner.NodeID, points) + if err != nil && tsdb.IsRetryable(err) { + // The remote write failed so queue it via hinted handoff + w.statMap.Add(statWritePointReqHH, int64(len(points))) + hherr := w.HintedHandoff.WriteShard(shardID, owner.NodeID, points) + + // If the write consistency level is ANY, then a successful hinted handoff can + // be considered a successful write so send nil to the response channel + // otherwise, let the original error propogate to the response channel + if hherr == nil && consistency == ConsistencyLevelAny { + ch <- &AsyncWriteResult{owner, nil} + return + } + } + ch <- &AsyncWriteResult{owner, err} + + }(shard.ID, owner, points) + } + + var wrote int + timeout := time.After(w.WriteTimeout) + var writeError error + for range shard.Owners { + select { + case <-w.closing: + return ErrWriteFailed + case <-timeout: + w.statMap.Add(statWriteTimeout, 1) + // return timeout error to caller + return ErrTimeout + case result := <-ch: + // If the write returned an error, continue to the next response + if result.Err != nil { + w.statMap.Add(statWriteErr, 1) + w.Logger.Printf("write failed for shard %d on node %d: %v", shard.ID, result.Owner.NodeID, result.Err) + + // Keep track of the first error we see to return back to the client + if writeError == nil { + writeError = result.Err + } + continue + } + + wrote += 1 + + // We wrote the required consistency level + if wrote >= required { + w.statMap.Add(statWriteOK, 1) + return nil + } + } + } + + if wrote > 0 { + w.statMap.Add(statWritePartial, 1) + return ErrPartialWrite + } + + if writeError != nil { + return fmt.Errorf("write failed: %v", writeError) + } + + return ErrWriteFailed +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go new file mode 100644 index 000000000..902a1d1de --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go @@ -0,0 +1,464 @@ +package cluster_test + +import ( + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/influxdb/influxdb/cluster" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/tsdb" +) + +// Ensures the points writer maps a single point to a single shard. +func TestPointsWriter_MapShards_One(t *testing.T) { + ms := MetaStore{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return &rp.ShardGroups[0], nil + } + + c := cluster.PointsWriter{MetaStore: ms} + pr := &cluster.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + ConsistencyLevel: cluster.ConsistencyLevelOne, + } + pr.AddPoint("cpu", 1.0, time.Now(), nil) + + var ( + shardMappings *cluster.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 1; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } +} + +// Ensures the points writer maps a multiple points across shard group boundaries. +func TestPointsWriter_MapShards_Multiple(t *testing.T) { + ms := MetaStore{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + + c := cluster.PointsWriter{MetaStore: ms} + pr := &cluster.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + ConsistencyLevel: cluster.ConsistencyLevelOne, + } + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) + pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) + + var ( + shardMappings *cluster.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 2; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } + + for _, points := range shardMappings.Points { + // First shard shoud have 1 point w/ first point added + if len(points) == 1 && points[0].Time() != pr.Points[0].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time()) + } + + // Second shard shoud have the last two points added + if len(points) == 2 && points[0].Time() != pr.Points[1].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time()) + } + + if len(points) == 2 && points[1].Time() != pr.Points[2].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time()) + } + } +} + +func TestPointsWriter_WritePoints(t *testing.T) { + tests := []struct { + name string + database string + retentionPolicy string + consistency cluster.ConsistencyLevel + + // the responses returned by each shard write call. node ID 1 = pos 0 + err []error + expErr error + }{ + // Consistency one + { + name: "write one success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelOne, + err: []error{nil, nil, nil}, + expErr: nil, + }, + { + name: "write one error", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelOne, + err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, + expErr: fmt.Errorf("write failed: a failure"), + }, + + // Consistency any + { + name: "write any success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAny, + err: []error{fmt.Errorf("a failure"), nil, fmt.Errorf("a failure")}, + expErr: nil, + }, + // Consistency all + { + name: "write all success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAll, + err: []error{nil, nil, nil}, + expErr: nil, + }, + { + name: "write all, 2/3, partial write", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAll, + err: []error{nil, fmt.Errorf("a failure"), nil}, + expErr: cluster.ErrPartialWrite, + }, + { + name: "write all, 1/3 (failure)", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAll, + err: []error{nil, fmt.Errorf("a failure"), fmt.Errorf("a failure")}, + expErr: cluster.ErrPartialWrite, + }, + + // Consistency quorum + { + name: "write quorum, 1/3 failure", + consistency: cluster.ConsistencyLevelQuorum, + database: "mydb", + retentionPolicy: "myrp", + err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), nil}, + expErr: cluster.ErrPartialWrite, + }, + { + name: "write quorum, 2/3 success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelQuorum, + err: []error{nil, nil, fmt.Errorf("a failure")}, + expErr: nil, + }, + { + name: "write quorum, 3/3 success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelQuorum, + err: []error{nil, nil, nil}, + expErr: nil, + }, + + // Error write error + { + name: "no writes succeed", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelOne, + err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, + expErr: fmt.Errorf("write failed: a failure"), + }, + + // Hinted handoff w/ ANY + { + name: "hinted handoff write succeed", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAny, + err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, + expErr: nil, + }, + + // Write to non-existant database + { + name: "write to non-existant database", + database: "doesnt_exist", + retentionPolicy: "", + consistency: cluster.ConsistencyLevelAny, + err: []error{nil, nil, nil}, + expErr: fmt.Errorf("database not found: doesnt_exist"), + }, + } + + for _, test := range tests { + + pr := &cluster.WritePointsRequest{ + Database: test.database, + RetentionPolicy: test.retentionPolicy, + ConsistencyLevel: test.consistency, + } + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) + pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) + + // copy to prevent data race + theTest := test + sm := cluster.NewShardMapping() + sm.MapPoint( + &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[0]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[1]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[2]) + + // Local cluster.Node ShardWriter + // lock on the write increment since these functions get called in parallel + var mu sync.Mutex + sw := &fakeShardWriter{ + ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error { + mu.Lock() + defer mu.Unlock() + return theTest.err[int(nodeID)-1] + }, + } + + store := &fakeStore{ + WriteFn: func(shardID uint64, points []tsdb.Point) error { + mu.Lock() + defer mu.Unlock() + return theTest.err[0] + }, + } + + hh := &fakeShardWriter{ + ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error { + return nil + }, + } + + ms := NewMetaStore() + ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) { + return nil, nil + } + ms.NodeIDFn = func() uint64 { return 1 } + c := cluster.NewPointsWriter() + c.MetaStore = ms + c.ShardWriter = sw + c.TSDBStore = store + c.HintedHandoff = hh + + err := c.WritePoints(pr) + if err == nil && test.expErr != nil { + t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + + if err != nil && test.expErr == nil { + t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() { + t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + } +} + +var shardID uint64 + +type fakeShardWriter struct { + ShardWriteFn func(shardID, nodeID uint64, points []tsdb.Point) error +} + +func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []tsdb.Point) error { + return f.ShardWriteFn(shardID, nodeID, points) +} + +type fakeStore struct { + WriteFn func(shardID uint64, points []tsdb.Point) error + CreateShardfn func(database, retentionPolicy string, shardID uint64) error +} + +func (f *fakeStore) WriteToShard(shardID uint64, points []tsdb.Point) error { + return f.WriteFn(shardID, points) +} + +func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64) error { + return f.CreateShardfn(database, retentionPolicy, shardID) +} + +func NewMetaStore() *MetaStore { + ms := &MetaStore{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + return ms +} + +type MetaStore struct { + NodeIDFn func() uint64 + RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error) + CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + DatabaseFn func(database string) (*meta.DatabaseInfo, error) + ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo) +} + +func (m MetaStore) NodeID() uint64 { return m.NodeIDFn() } + +func (m MetaStore) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) { + return m.RetentionPolicyFn(database, name) +} + +func (m MetaStore) CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp) +} + +func (m MetaStore) Database(database string) (*meta.DatabaseInfo, error) { + return m.DatabaseFn(database) +} + +func (m MetaStore) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) { + return m.ShardOwnerFn(shardID) +} + +func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo { + shards := []meta.ShardInfo{} + owners := []meta.ShardOwner{} + for i := 1; i <= nodeCount; i++ { + owners = append(owners, meta.ShardOwner{NodeID: uint64(i)}) + } + + // each node is fully replicated with each other + shards = append(shards, meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }) + + rp := &meta.RetentionPolicyInfo{ + Name: "myrp", + ReplicaN: nodeCount, + Duration: duration, + ShardGroupDuration: duration, + ShardGroups: []meta.ShardGroupInfo{ + meta.ShardGroupInfo{ + ID: nextShardID(), + StartTime: time.Unix(0, 0), + EndTime: time.Unix(0, 0).Add(duration).Add(-1), + Shards: shards, + }, + }, + } + return rp +} + +func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) { + var startTime, endTime time.Time + if len(rp.ShardGroups) == 0 { + startTime = time.Unix(0, 0) + } else { + startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration) + } + endTime = startTime.Add(rp.ShardGroupDuration).Add(-1) + + sh := meta.ShardGroupInfo{ + ID: uint64(len(rp.ShardGroups) + 1), + StartTime: startTime, + EndTime: endTime, + Shards: []meta.ShardInfo{ + meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }, + }, + } + rp.ShardGroups = append(rp.ShardGroups, sh) +} + +func nextShardID() uint64 { + return atomic.AddUint64(&shardID, 1) +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go new file mode 100644 index 000000000..09dcc4a1c --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go @@ -0,0 +1,164 @@ +package cluster + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdb/influxdb/cluster/internal" + "github.com/influxdb/influxdb/tsdb" +) + +//go:generate protoc --gogo_out=. internal/data.proto + +// MapShardRequest represents the request to map a remote shard for a query. +type MapShardRequest struct { + pb internal.MapShardRequest +} + +func (m *MapShardRequest) ShardID() uint64 { return m.pb.GetShardID() } +func (m *MapShardRequest) Query() string { return m.pb.GetQuery() } +func (m *MapShardRequest) ChunkSize() int32 { return m.pb.GetChunkSize() } + +func (m *MapShardRequest) SetShardID(id uint64) { m.pb.ShardID = &id } +func (m *MapShardRequest) SetQuery(query string) { m.pb.Query = &query } +func (m *MapShardRequest) SetChunkSize(chunkSize int32) { m.pb.ChunkSize = &chunkSize } + +// MarshalBinary encodes the object to a binary format. +func (m *MapShardRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(&m.pb) +} + +// UnmarshalBinary populates MapShardRequest from a binary format. +func (m *MapShardRequest) UnmarshalBinary(buf []byte) error { + if err := proto.Unmarshal(buf, &m.pb); err != nil { + return err + } + return nil +} + +// MapShardResponse represents the response returned from a remote MapShardRequest call +type MapShardResponse struct { + pb internal.MapShardResponse +} + +func NewMapShardResponse(code int, message string) *MapShardResponse { + m := &MapShardResponse{} + m.SetCode(code) + m.SetMessage(message) + return m +} + +func (r *MapShardResponse) Code() int { return int(r.pb.GetCode()) } +func (r *MapShardResponse) Message() string { return r.pb.GetMessage() } +func (r *MapShardResponse) TagSets() []string { return r.pb.GetTagSets() } +func (r *MapShardResponse) Fields() []string { return r.pb.GetFields() } +func (r *MapShardResponse) Data() []byte { return r.pb.GetData() } + +func (r *MapShardResponse) SetCode(code int) { r.pb.Code = proto.Int32(int32(code)) } +func (r *MapShardResponse) SetMessage(message string) { r.pb.Message = &message } +func (r *MapShardResponse) SetTagSets(tagsets []string) { r.pb.TagSets = tagsets } +func (r *MapShardResponse) SetFields(fields []string) { r.pb.Fields = fields } +func (r *MapShardResponse) SetData(data []byte) { r.pb.Data = data } + +// MarshalBinary encodes the object to a binary format. +func (r *MapShardResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(&r.pb) +} + +// UnmarshalBinary populates WritePointRequest from a binary format. +func (r *MapShardResponse) UnmarshalBinary(buf []byte) error { + if err := proto.Unmarshal(buf, &r.pb); err != nil { + return err + } + return nil +} + +// WritePointsRequest represents a request to write point data to the cluster +type WritePointsRequest struct { + Database string + RetentionPolicy string + ConsistencyLevel ConsistencyLevel + Points []tsdb.Point +} + +// AddPoint adds a point to the WritePointRequest with field name 'value' +func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { + w.Points = append(w.Points, tsdb.NewPoint( + name, tags, map[string]interface{}{"value": value}, timestamp, + )) +} + +// WriteShardRequest represents the a request to write a slice of points to a shard +type WriteShardRequest struct { + pb internal.WriteShardRequest +} + +// WriteShardResponse represents the response returned from a remote WriteShardRequest call +type WriteShardResponse struct { + pb internal.WriteShardResponse +} + +func (w *WriteShardRequest) SetShardID(id uint64) { w.pb.ShardID = &id } +func (w *WriteShardRequest) ShardID() uint64 { return w.pb.GetShardID() } + +func (w *WriteShardRequest) Points() []tsdb.Point { return w.unmarshalPoints() } + +func (w *WriteShardRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { + w.AddPoints([]tsdb.Point{tsdb.NewPoint( + name, tags, map[string]interface{}{"value": value}, timestamp, + )}) +} + +func (w *WriteShardRequest) AddPoints(points []tsdb.Point) { + for _, p := range points { + w.pb.Points = append(w.pb.Points, []byte(p.String())) + } +} + +// MarshalBinary encodes the object to a binary format. +func (w *WriteShardRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(&w.pb) +} + +// UnmarshalBinary populates WritePointRequest from a binary format. +func (w *WriteShardRequest) UnmarshalBinary(buf []byte) error { + if err := proto.Unmarshal(buf, &w.pb); err != nil { + return err + } + return nil +} + +func (w *WriteShardRequest) unmarshalPoints() []tsdb.Point { + points := make([]tsdb.Point, len(w.pb.GetPoints())) + for i, p := range w.pb.GetPoints() { + pt, err := tsdb.ParsePoints(p) + if err != nil { + // A error here means that one node parsed the point correctly but sent an + // unparseable version to another node. We could log and drop the point and allow + // anti-entropy to resolve the discrepancy but this shouldn't ever happen. + panic(fmt.Sprintf("failed to parse point: `%v`: %v", string(p), err)) + } + points[i] = pt[0] + } + return points +} + +func (w *WriteShardResponse) SetCode(code int) { w.pb.Code = proto.Int32(int32(code)) } +func (w *WriteShardResponse) SetMessage(message string) { w.pb.Message = &message } + +func (w *WriteShardResponse) Code() int { return int(w.pb.GetCode()) } +func (w *WriteShardResponse) Message() string { return w.pb.GetMessage() } + +// MarshalBinary encodes the object to a binary format. +func (w *WriteShardResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(&w.pb) +} + +// UnmarshalBinary populates WritePointRequest from a binary format. +func (w *WriteShardResponse) UnmarshalBinary(buf []byte) error { + if err := proto.Unmarshal(buf, &w.pb); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go new file mode 100644 index 000000000..4e42cd5d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go @@ -0,0 +1,110 @@ +package cluster + +import ( + "testing" + "time" +) + +func TestWriteShardRequestBinary(t *testing.T) { + sr := &WriteShardRequest{} + + sr.SetShardID(uint64(1)) + if exp := uint64(1); sr.ShardID() != exp { + t.Fatalf("ShardID mismatch: got %v, exp %v", sr.ShardID(), exp) + } + + sr.AddPoint("cpu", 1.0, time.Unix(0, 0), map[string]string{"host": "serverA"}) + sr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) + sr.AddPoint("cpu_load", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) + + b, err := sr.MarshalBinary() + if err != nil { + t.Fatalf("WritePointsRequest.MarshalBinary() failed: %v", err) + } + if len(b) == 0 { + t.Fatalf("WritePointsRequest.MarshalBinary() returned 0 bytes") + } + + got := &WriteShardRequest{} + if err := got.UnmarshalBinary(b); err != nil { + t.Fatalf("WritePointsRequest.UnmarshalMarshalBinary() failed: %v", err) + } + + if got.ShardID() != sr.ShardID() { + t.Errorf("ShardID mismatch: got %v, exp %v", got.ShardID(), sr.ShardID()) + } + + if len(got.Points()) != len(sr.Points()) { + t.Errorf("Points count mismatch: got %v, exp %v", len(got.Points()), len(sr.Points())) + } + + srPoints := sr.Points() + gotPoints := got.Points() + for i, p := range srPoints { + g := gotPoints[i] + + if g.Name() != p.Name() { + t.Errorf("Point %d name mismatch: got %v, exp %v", i, g.Name(), p.Name()) + } + + if !g.Time().Equal(p.Time()) { + t.Errorf("Point %d time mismatch: got %v, exp %v", i, g.Time(), p.Time()) + } + + if g.HashID() != p.HashID() { + t.Errorf("Point #%d HashID() mismatch: got %v, exp %v", i, g.HashID(), p.HashID()) + } + + for k, v := range p.Tags() { + if g.Tags()[k] != v { + t.Errorf("Point #%d tag mismatch: got %v, exp %v", i, k, v) + } + } + + if len(p.Fields()) != len(g.Fields()) { + t.Errorf("Point %d field count mismatch: got %v, exp %v", i, len(g.Fields()), len(p.Fields())) + } + + for j, f := range p.Fields() { + if g.Fields()[j] != f { + t.Errorf("Point %d field mismatch: got %v, exp %v", i, g.Fields()[j], f) + } + } + } +} + +func TestWriteShardResponseBinary(t *testing.T) { + sr := &WriteShardResponse{} + sr.SetCode(10) + sr.SetMessage("foo") + b, err := sr.MarshalBinary() + + if exp := 10; sr.Code() != exp { + t.Fatalf("Code mismatch: got %v, exp %v", sr.Code(), exp) + } + + if exp := "foo"; sr.Message() != exp { + t.Fatalf("Message mismatch: got %v, exp %v", sr.Message(), exp) + } + + if err != nil { + t.Fatalf("WritePointsResponse.MarshalBinary() failed: %v", err) + } + if len(b) == 0 { + t.Fatalf("WritePointsResponse.MarshalBinary() returned 0 bytes") + } + + got := &WriteShardResponse{} + if err := got.UnmarshalBinary(b); err != nil { + t.Fatalf("WritePointsResponse.UnmarshalMarshalBinary() failed: %v", err) + } + + if got.Code() != sr.Code() { + t.Errorf("Code mismatch: got %v, exp %v", got.Code(), sr.Code()) + } + + if got.Message() != sr.Message() { + t.Errorf("Message mismatch: got %v, exp %v", got.Message(), sr.Message()) + } + +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go new file mode 100644 index 000000000..325144929 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go @@ -0,0 +1,351 @@ +package cluster + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "sync" + + "github.com/influxdb/influxdb/influxql" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/tsdb" +) + +// MaxMessageSize defines how large a message can be before we reject it +const MaxMessageSize = 1024 * 1024 * 1024 // 1GB + +// MuxHeader is the header byte used in the TCP mux. +const MuxHeader = 2 + +// Service processes data received over raw TCP connections. +type Service struct { + mu sync.RWMutex + + wg sync.WaitGroup + closing chan struct{} + + Listener net.Listener + + MetaStore interface { + ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) + } + + TSDBStore interface { + CreateShard(database, policy string, shardID uint64) error + WriteToShard(shardID uint64, points []tsdb.Point) error + CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) + } + + Logger *log.Logger +} + +// NewService returns a new instance of Service. +func NewService(c Config) *Service { + return &Service{ + closing: make(chan struct{}), + Logger: log.New(os.Stderr, "[tcp] ", log.LstdFlags), + } +} + +// Open opens the network listener and begins serving requests. +func (s *Service) Open() error { + + s.Logger.Println("Starting cluster service") + // Begin serving conections. + s.wg.Add(1) + go s.serve() + + return nil +} + +// SetLogger sets the internal logger to the logger passed in. +func (s *Service) SetLogger(l *log.Logger) { + s.Logger = l +} + +// serve accepts connections from the listener and handles them. +func (s *Service) serve() { + defer s.wg.Done() + + for { + // Check if the service is shutting down. + select { + case <-s.closing: + return + default: + } + + // Accept the next connection. + conn, err := s.Listener.Accept() + if err != nil { + if strings.Contains(err.Error(), "connection closed") { + s.Logger.Printf("cluster service accept error: %s", err) + return + } + s.Logger.Printf("accept error: %s", err) + continue + } + + // Delegate connection handling to a separate goroutine. + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.handleConn(conn) + }() + } +} + +// Close shuts down the listener and waits for all connections to finish. +func (s *Service) Close() error { + if s.Listener != nil { + s.Listener.Close() + } + + // Shut down all handlers. + close(s.closing) + s.wg.Wait() + + return nil +} + +// handleConn services an individual TCP connection. +func (s *Service) handleConn(conn net.Conn) { + // Ensure connection is closed when service is closed. + closing := make(chan struct{}) + defer close(closing) + go func() { + select { + case <-closing: + case <-s.closing: + } + conn.Close() + }() + + s.Logger.Printf("accept remote write connection from %v\n", conn.RemoteAddr()) + defer func() { + s.Logger.Printf("close remote write connection from %v\n", conn.RemoteAddr()) + }() + for { + // Read type-length-value. + typ, buf, err := ReadTLV(conn) + if err != nil { + if strings.HasSuffix(err.Error(), "EOF") { + return + } + s.Logger.Printf("unable to read type-length-value %s", err) + return + } + + // Delegate message processing by type. + switch typ { + case writeShardRequestMessage: + err := s.processWriteShardRequest(buf) + if err != nil { + s.Logger.Printf("process write shard error: %s", err) + } + s.writeShardResponse(conn, err) + case mapShardRequestMessage: + err := s.processMapShardRequest(conn, buf) + if err != nil { + s.Logger.Printf("process map shard error: %s", err) + if err := writeMapShardResponseMessage(conn, NewMapShardResponse(1, err.Error())); err != nil { + s.Logger.Printf("process map shard error writing response: %s", err.Error()) + } + } + default: + s.Logger.Printf("cluster service message type not found: %d", typ) + } + } +} + +func (s *Service) processWriteShardRequest(buf []byte) error { + // Build request + var req WriteShardRequest + if err := req.UnmarshalBinary(buf); err != nil { + return err + } + + err := s.TSDBStore.WriteToShard(req.ShardID(), req.Points()) + + // We may have received a write for a shard that we don't have locally because the + // sending node may have just created the shard (via the metastore) and the write + // arrived before the local store could create the shard. In this case, we need + // to check the metastore to determine what database and retention policy this + // shard should reside within. + if err == tsdb.ErrShardNotFound { + + // Query the metastore for the owner of this shard + database, retentionPolicy, sgi := s.MetaStore.ShardOwner(req.ShardID()) + if sgi == nil { + // If we can't find it, then we need to drop this request + // as it is no longer valid. This could happen if writes were queued via + // hinted handoff and delivered after a shard group was deleted. + s.Logger.Printf("drop write request: shard=%d. shard group does not exist or was deleted", req.ShardID()) + return nil + } + + err = s.TSDBStore.CreateShard(database, retentionPolicy, req.ShardID()) + if err != nil { + return err + } + return s.TSDBStore.WriteToShard(req.ShardID(), req.Points()) + } + + if err != nil { + return fmt.Errorf("write shard %d: %s", req.ShardID(), err) + } + + return nil +} + +func (s *Service) writeShardResponse(w io.Writer, e error) { + // Build response. + var resp WriteShardResponse + if e != nil { + resp.SetCode(1) + resp.SetMessage(e.Error()) + } else { + resp.SetCode(0) + } + + // Marshal response to binary. + buf, err := resp.MarshalBinary() + if err != nil { + s.Logger.Printf("error marshalling shard response: %s", err) + return + } + + // Write to connection. + if err := WriteTLV(w, writeShardResponseMessage, buf); err != nil { + s.Logger.Printf("write shard response error: %s", err) + } +} + +func (s *Service) processMapShardRequest(w io.Writer, buf []byte) error { + // Decode request + var req MapShardRequest + if err := req.UnmarshalBinary(buf); err != nil { + return err + } + + // Parse the statement. + q, err := influxql.ParseQuery(req.Query()) + if err != nil { + return fmt.Errorf("processing map shard: %s", err) + } else if len(q.Statements) != 1 { + return fmt.Errorf("processing map shard: expected 1 statement but got %d", len(q.Statements)) + } + + m, err := s.TSDBStore.CreateMapper(req.ShardID(), q.Statements[0], int(req.ChunkSize())) + if err != nil { + return fmt.Errorf("create mapper: %s", err) + } + if m == nil { + return writeMapShardResponseMessage(w, NewMapShardResponse(0, "")) + } + + if err := m.Open(); err != nil { + return fmt.Errorf("mapper open: %s", err) + } + defer m.Close() + + var metaSent bool + for { + var resp MapShardResponse + + if !metaSent { + resp.SetTagSets(m.TagSets()) + resp.SetFields(m.Fields()) + metaSent = true + } + + chunk, err := m.NextChunk() + if err != nil { + return fmt.Errorf("next chunk: %s", err) + } + + // NOTE: Even if the chunk is nil, we still need to send one + // empty response to let the other side know we're out of data. + + if chunk != nil { + b, err := json.Marshal(chunk) + if err != nil { + return fmt.Errorf("encoding: %s", err) + } + resp.SetData(b) + } + + // Write to connection. + resp.SetCode(0) + if err := writeMapShardResponseMessage(w, &resp); err != nil { + return err + } + + if chunk == nil { + // All mapper data sent. + return nil + } + } +} + +func writeMapShardResponseMessage(w io.Writer, msg *MapShardResponse) error { + buf, err := msg.MarshalBinary() + if err != nil { + return err + } + return WriteTLV(w, mapShardResponseMessage, buf) +} + +// ReadTLV reads a type-length-value record from r. +func ReadTLV(r io.Reader) (byte, []byte, error) { + var typ [1]byte + if _, err := io.ReadFull(r, typ[:]); err != nil { + return 0, nil, fmt.Errorf("read message type: %s", err) + } + + // Read the size of the message. + var sz int64 + if err := binary.Read(r, binary.BigEndian, &sz); err != nil { + return 0, nil, fmt.Errorf("read message size: %s", err) + } + + if sz == 0 { + return 0, nil, fmt.Errorf("invalid message size: %d", sz) + } + + if sz >= MaxMessageSize { + return 0, nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz) + } + + // Read the value. + buf := make([]byte, sz) + if _, err := io.ReadFull(r, buf); err != nil { + return 0, nil, fmt.Errorf("read message value: %s", err) + } + + return typ[0], buf, nil +} + +// WriteTLV writes a type-length-value record to w. +func WriteTLV(w io.Writer, typ byte, buf []byte) error { + if _, err := w.Write([]byte{typ}); err != nil { + return fmt.Errorf("write message type: %s", err) + } + + // Write the size of the message. + if err := binary.Write(w, binary.BigEndian, int64(len(buf))); err != nil { + return fmt.Errorf("write message size: %s", err) + } + + // Write the value. + if _, err := w.Write(buf); err != nil { + return fmt.Errorf("write message value: %s", err) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go new file mode 100644 index 000000000..3006bc4fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go @@ -0,0 +1,104 @@ +package cluster_test + +import ( + "fmt" + "net" + "time" + + "github.com/influxdb/influxdb/cluster" + "github.com/influxdb/influxdb/influxql" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/tcp" + "github.com/influxdb/influxdb/tsdb" +) + +type metaStore struct { + host string +} + +func (m *metaStore) Node(nodeID uint64) (*meta.NodeInfo, error) { + return &meta.NodeInfo{ + ID: nodeID, + Host: m.host, + }, nil +} + +type testService struct { + nodeID uint64 + ln net.Listener + muxln net.Listener + writeShardFunc func(shardID uint64, points []tsdb.Point) error + createShardFunc func(database, policy string, shardID uint64) error + createMapperFunc func(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) +} + +func newTestWriteService(f func(shardID uint64, points []tsdb.Point) error) testService { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic(err) + } + + mux := tcp.NewMux() + muxln := mux.Listen(cluster.MuxHeader) + go mux.Serve(ln) + + return testService{ + writeShardFunc: f, + ln: ln, + muxln: muxln, + } +} + +func (ts *testService) Close() { + if ts.ln != nil { + ts.ln.Close() + } +} + +type serviceResponses []serviceResponse +type serviceResponse struct { + shardID uint64 + ownerID uint64 + points []tsdb.Point +} + +func (t testService) WriteToShard(shardID uint64, points []tsdb.Point) error { + return t.writeShardFunc(shardID, points) +} + +func (t testService) CreateShard(database, policy string, shardID uint64) error { + return t.createShardFunc(database, policy, shardID) +} + +func (t testService) CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) { + return t.createMapperFunc(shardID, stmt, chunkSize) +} + +func writeShardSuccess(shardID uint64, points []tsdb.Point) error { + responses <- &serviceResponse{ + shardID: shardID, + points: points, + } + return nil +} + +func writeShardFail(shardID uint64, points []tsdb.Point) error { + return fmt.Errorf("failed to write") +} + +var responses = make(chan *serviceResponse, 1024) + +func (testService) ResponseN(n int) ([]*serviceResponse, error) { + var a []*serviceResponse + for { + select { + case r := <-responses: + a = append(a, r) + if len(a) == n { + return a, nil + } + case <-time.After(time.Second): + return a, fmt.Errorf("unexpected response count: expected: %d, actual: %d", n, len(a)) + } + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go new file mode 100644 index 000000000..88000bde7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go @@ -0,0 +1,196 @@ +package cluster + +import ( + "fmt" + "math/rand" + "net" + "time" + + "github.com/influxdb/influxdb/influxql" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/tsdb" +) + +// ShardMapper is responsible for providing mappers for requested shards. It is +// responsible for creating those mappers from the local store, or reaching +// out to another node on the cluster. +type ShardMapper struct { + ForceRemoteMapping bool // All shards treated as remote. Useful for testing. + + MetaStore interface { + NodeID() uint64 + Node(id uint64) (ni *meta.NodeInfo, err error) + } + + TSDBStore interface { + CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) + } + + timeout time.Duration + pool *clientPool +} + +// NewShardMapper returns a mapper of local and remote shards. +func NewShardMapper(timeout time.Duration) *ShardMapper { + return &ShardMapper{ + pool: newClientPool(), + timeout: timeout, + } +} + +// CreateMapper returns a Mapper for the given shard ID. +func (s *ShardMapper) CreateMapper(sh meta.ShardInfo, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) { + m, err := s.TSDBStore.CreateMapper(sh.ID, stmt, chunkSize) + if err != nil { + return nil, err + } + + if !sh.OwnedBy(s.MetaStore.NodeID()) || s.ForceRemoteMapping { + // Pick a node in a pseudo-random manner. + conn, err := s.dial(sh.Owners[rand.Intn(len(sh.Owners))].NodeID) + if err != nil { + return nil, err + } + conn.SetDeadline(time.Now().Add(s.timeout)) + + m.SetRemote(NewRemoteMapper(conn, sh.ID, stmt, chunkSize)) + } + + return m, nil +} + +func (s *ShardMapper) dial(nodeID uint64) (net.Conn, error) { + ni, err := s.MetaStore.Node(nodeID) + if err != nil { + return nil, err + } + conn, err := net.Dial("tcp", ni.Host) + if err != nil { + return nil, err + } + + // Write the cluster multiplexing header byte + conn.Write([]byte{MuxHeader}) + + return conn, nil +} + +// RemoteMapper implements the tsdb.Mapper interface. It connects to a remote node, +// sends a query, and interprets the stream of data that comes back. +type RemoteMapper struct { + shardID uint64 + stmt influxql.Statement + chunkSize int + + tagsets []string + fields []string + + conn net.Conn + bufferedResponse *MapShardResponse +} + +// NewRemoteMapper returns a new remote mapper using the given connection. +func NewRemoteMapper(c net.Conn, shardID uint64, stmt influxql.Statement, chunkSize int) *RemoteMapper { + return &RemoteMapper{ + conn: c, + shardID: shardID, + stmt: stmt, + chunkSize: chunkSize, + } +} + +// Open connects to the remote node and starts receiving data. +func (r *RemoteMapper) Open() (err error) { + defer func() { + if err != nil { + r.conn.Close() + } + }() + // Build Map request. + var request MapShardRequest + request.SetShardID(r.shardID) + request.SetQuery(r.stmt.String()) + request.SetChunkSize(int32(r.chunkSize)) + + // Marshal into protocol buffers. + buf, err := request.MarshalBinary() + if err != nil { + return err + } + + // Write request. + if err := WriteTLV(r.conn, mapShardRequestMessage, buf); err != nil { + return err + } + + // Read the response. + _, buf, err = ReadTLV(r.conn) + if err != nil { + return err + } + + // Unmarshal response. + r.bufferedResponse = &MapShardResponse{} + if err := r.bufferedResponse.UnmarshalBinary(buf); err != nil { + return err + } + + if r.bufferedResponse.Code() != 0 { + return fmt.Errorf("error code %d: %s", r.bufferedResponse.Code(), r.bufferedResponse.Message()) + } + + // Decode the first response to get the TagSets. + r.tagsets = r.bufferedResponse.TagSets() + r.fields = r.bufferedResponse.Fields() + + return nil +} + +func (r *RemoteMapper) SetRemote(m tsdb.Mapper) error { + return fmt.Errorf("cannot set remote mapper on a remote mapper") +} + +func (r *RemoteMapper) TagSets() []string { + return r.tagsets +} + +func (r *RemoteMapper) Fields() []string { + return r.fields +} + +// NextChunk returns the next chunk read from the remote node to the client. +func (r *RemoteMapper) NextChunk() (chunk interface{}, err error) { + var response *MapShardResponse + if r.bufferedResponse != nil { + response = r.bufferedResponse + r.bufferedResponse = nil + } else { + response = &MapShardResponse{} + + // Read the response. + _, buf, err := ReadTLV(r.conn) + if err != nil { + return nil, err + } + + // Unmarshal response. + if err := response.UnmarshalBinary(buf); err != nil { + return nil, err + } + + if response.Code() != 0 { + return nil, fmt.Errorf("error code %d: %s", response.Code(), response.Message()) + } + } + + if response.Data() == nil { + return nil, nil + } + + return response.Data(), err +} + +// Close the Mapper +func (r *RemoteMapper) Close() { + r.conn.Close() +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go new file mode 100644 index 000000000..955735299 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go @@ -0,0 +1,114 @@ +package cluster + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "testing" + + "github.com/influxdb/influxdb/influxql" + "github.com/influxdb/influxdb/tsdb" +) + +// remoteShardResponder implements the remoteShardConn interface. +type remoteShardResponder struct { + net.Conn + t *testing.T + rxBytes []byte + + buffer *bytes.Buffer +} + +func newRemoteShardResponder(outputs []*tsdb.MapperOutput, tagsets []string) *remoteShardResponder { + r := &remoteShardResponder{} + a := make([]byte, 0, 1024) + r.buffer = bytes.NewBuffer(a) + + // Pump the outputs in the buffer for later reading. + for _, o := range outputs { + resp := &MapShardResponse{} + resp.SetCode(0) + if o != nil { + d, _ := json.Marshal(o) + resp.SetData(d) + resp.SetTagSets(tagsets) + } + + g, _ := resp.MarshalBinary() + WriteTLV(r.buffer, mapShardResponseMessage, g) + } + + return r +} + +func (r remoteShardResponder) Close() error { return nil } +func (r remoteShardResponder) Read(p []byte) (n int, err error) { + return io.ReadFull(r.buffer, p) +} + +func (r remoteShardResponder) Write(p []byte) (n int, err error) { + if r.rxBytes == nil { + r.rxBytes = make([]byte, 0) + } + r.rxBytes = append(r.rxBytes, p...) + return len(p), nil +} + +// Ensure a RemoteMapper can process valid responses from a remote shard. +func TestShardWriter_RemoteMapper_Success(t *testing.T) { + expTagSets := []string{"tagsetA"} + expOutput := &tsdb.MapperOutput{ + Name: "cpu", + Tags: map[string]string{"host": "serverA"}, + } + + c := newRemoteShardResponder([]*tsdb.MapperOutput{expOutput, nil}, expTagSets) + + r := NewRemoteMapper(c, 1234, mustParseStmt("SELECT * FROM CPU"), 10) + if err := r.Open(); err != nil { + t.Fatalf("failed to open remote mapper: %s", err.Error()) + } + + if r.TagSets()[0] != expTagSets[0] { + t.Fatalf("incorrect tagsets received, exp %v, got %v", expTagSets, r.TagSets()) + } + + // Get first chunk from mapper. + chunk, err := r.NextChunk() + if err != nil { + t.Fatalf("failed to get next chunk from mapper: %s", err.Error()) + } + b, ok := chunk.([]byte) + if !ok { + t.Fatal("chunk is not of expected type") + } + output := &tsdb.MapperOutput{} + if err := json.Unmarshal(b, output); err != nil { + t.Fatal(err) + } + if output.Name != "cpu" { + t.Fatalf("received output incorrect, exp: %v, got %v", expOutput, output) + } + + // Next chunk should be nil, indicating no more data. + chunk, err = r.NextChunk() + if err != nil { + t.Fatalf("failed to get next chunk from mapper: %s", err.Error()) + } + if chunk != nil { + t.Fatal("received more chunks when none expected") + } +} + +// mustParseStmt parses a single statement or panics. +func mustParseStmt(stmt string) influxql.Statement { + q, err := influxql.ParseQuery(stmt) + if err != nil { + panic(err) + } else if len(q.Statements) != 1 { + panic(fmt.Sprintf("expected 1 statement but got %d", len(q.Statements))) + } + return q.Statements[0] +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go new file mode 100644 index 000000000..a0f317be9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go @@ -0,0 +1,163 @@ +package cluster + +import ( + "fmt" + "net" + "time" + + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/tsdb" + "gopkg.in/fatih/pool.v2" +) + +const ( + writeShardRequestMessage byte = iota + 1 + writeShardResponseMessage + mapShardRequestMessage + mapShardResponseMessage +) + +// ShardWriter writes a set of points to a shard. +type ShardWriter struct { + pool *clientPool + timeout time.Duration + + MetaStore interface { + Node(id uint64) (ni *meta.NodeInfo, err error) + } +} + +// NewShardWriter returns a new instance of ShardWriter. +func NewShardWriter(timeout time.Duration) *ShardWriter { + return &ShardWriter{ + pool: newClientPool(), + timeout: timeout, + } +} + +func (w *ShardWriter) WriteShard(shardID, ownerID uint64, points []tsdb.Point) error { + c, err := w.dial(ownerID) + if err != nil { + return err + } + + conn, ok := c.(*pool.PoolConn) + if !ok { + panic("wrong connection type") + } + defer func(conn net.Conn) { + conn.Close() // return to pool + }(conn) + + // Build write request. + var request WriteShardRequest + request.SetShardID(shardID) + request.AddPoints(points) + + // Marshal into protocol buffers. + buf, err := request.MarshalBinary() + if err != nil { + return err + } + + // Write request. + conn.SetWriteDeadline(time.Now().Add(w.timeout)) + if err := WriteTLV(conn, writeShardRequestMessage, buf); err != nil { + conn.MarkUnusable() + return err + } + + // Read the response. + conn.SetReadDeadline(time.Now().Add(w.timeout)) + _, buf, err = ReadTLV(conn) + if err != nil { + conn.MarkUnusable() + return err + } + + // Unmarshal response. + var response WriteShardResponse + if err := response.UnmarshalBinary(buf); err != nil { + return err + } + + if response.Code() != 0 { + return fmt.Errorf("error code %d: %s", response.Code(), response.Message()) + } + + return nil +} + +func (c *ShardWriter) dial(nodeID uint64) (net.Conn, error) { + // If we don't have a connection pool for that addr yet, create one + _, ok := c.pool.getPool(nodeID) + if !ok { + factory := &connFactory{nodeID: nodeID, clientPool: c.pool, timeout: c.timeout} + factory.metaStore = c.MetaStore + + p, err := pool.NewChannelPool(1, 3, factory.dial) + if err != nil { + return nil, err + } + c.pool.setPool(nodeID, p) + } + return c.pool.conn(nodeID) +} + +func (w *ShardWriter) Close() error { + if w.pool == nil { + return fmt.Errorf("client already closed") + } + w.pool.close() + w.pool = nil + return nil +} + +const ( + maxConnections = 500 + maxRetries = 3 +) + +var errMaxConnectionsExceeded = fmt.Errorf("can not exceed max connections of %d", maxConnections) + +type connFactory struct { + nodeID uint64 + timeout time.Duration + + clientPool interface { + size() int + } + + metaStore interface { + Node(id uint64) (ni *meta.NodeInfo, err error) + } +} + +func (c *connFactory) dial() (net.Conn, error) { + if c.clientPool.size() > maxConnections { + return nil, errMaxConnectionsExceeded + } + + ni, err := c.metaStore.Node(c.nodeID) + if err != nil { + return nil, err + } + + if ni == nil { + return nil, fmt.Errorf("node %d does not exist", c.nodeID) + } + + conn, err := net.DialTimeout("tcp", ni.Host, c.timeout) + if err != nil { + return nil, err + } + + // Write a marker byte for cluster messages. + _, err = conn.Write([]byte{MuxHeader}) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go new file mode 100644 index 000000000..d994315ca --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go @@ -0,0 +1,186 @@ +package cluster_test + +import ( + "net" + "strings" + "testing" + "time" + + "github.com/influxdb/influxdb/cluster" + "github.com/influxdb/influxdb/tsdb" +) + +// Ensure the shard writer can successful write a single request. +func TestShardWriter_WriteShard_Success(t *testing.T) { + ts := newTestWriteService(writeShardSuccess) + s := cluster.NewService(cluster.Config{}) + s.Listener = ts.muxln + s.TSDBStore = ts + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + defer ts.Close() + + w := cluster.NewShardWriter(time.Minute) + w.MetaStore = &metaStore{host: ts.ln.Addr().String()} + + // Build a single point. + now := time.Now() + var points []tsdb.Point + points = append(points, tsdb.NewPoint("cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) + + // Write to shard and close. + if err := w.WriteShard(1, 2, points); err != nil { + t.Fatal(err) + } else if err := w.Close(); err != nil { + t.Fatal(err) + } + + // Validate response. + responses, err := ts.ResponseN(1) + if err != nil { + t.Fatal(err) + } else if responses[0].shardID != 1 { + t.Fatalf("unexpected shard id: %d", responses[0].shardID) + } + + // Validate point. + if p := responses[0].points[0]; p.Name() != "cpu" { + t.Fatalf("unexpected name: %s", p.Name()) + } else if p.Fields()["value"] != int64(100) { + t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) + } else if p.Tags()["host"] != "server01" { + t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) + } else if p.Time().UnixNano() != now.UnixNano() { + t.Fatalf("unexpected time: %s", p.Time()) + } +} + +// Ensure the shard writer can successful write a multiple requests. +func TestShardWriter_WriteShard_Multiple(t *testing.T) { + ts := newTestWriteService(writeShardSuccess) + s := cluster.NewService(cluster.Config{}) + s.Listener = ts.muxln + s.TSDBStore = ts + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + defer ts.Close() + + w := cluster.NewShardWriter(time.Minute) + w.MetaStore = &metaStore{host: ts.ln.Addr().String()} + + // Build a single point. + now := time.Now() + var points []tsdb.Point + points = append(points, tsdb.NewPoint("cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) + + // Write to shard twice and close. + if err := w.WriteShard(1, 2, points); err != nil { + t.Fatal(err) + } else if err := w.WriteShard(1, 2, points); err != nil { + t.Fatal(err) + } else if err := w.Close(); err != nil { + t.Fatal(err) + } + + // Validate response. + responses, err := ts.ResponseN(1) + if err != nil { + t.Fatal(err) + } else if responses[0].shardID != 1 { + t.Fatalf("unexpected shard id: %d", responses[0].shardID) + } + + // Validate point. + if p := responses[0].points[0]; p.Name() != "cpu" { + t.Fatalf("unexpected name: %s", p.Name()) + } else if p.Fields()["value"] != int64(100) { + t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) + } else if p.Tags()["host"] != "server01" { + t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) + } else if p.Time().UnixNano() != now.UnixNano() { + t.Fatalf("unexpected time: %s", p.Time()) + } +} + +// Ensure the shard writer returns an error when the server fails to accept the write. +func TestShardWriter_WriteShard_Error(t *testing.T) { + ts := newTestWriteService(writeShardFail) + s := cluster.NewService(cluster.Config{}) + s.Listener = ts.muxln + s.TSDBStore = ts + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + defer ts.Close() + + w := cluster.NewShardWriter(time.Minute) + w.MetaStore = &metaStore{host: ts.ln.Addr().String()} + now := time.Now() + + shardID := uint64(1) + ownerID := uint64(2) + var points []tsdb.Point + points = append(points, tsdb.NewPoint( + "cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, + )) + + if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" { + t.Fatalf("unexpected error: %v", err) + } +} + +// Ensure the shard writer returns an error when dialing times out. +func TestShardWriter_Write_ErrDialTimeout(t *testing.T) { + ts := newTestWriteService(writeShardSuccess) + s := cluster.NewService(cluster.Config{}) + s.Listener = ts.muxln + s.TSDBStore = ts + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + defer ts.Close() + + w := cluster.NewShardWriter(time.Nanosecond) + w.MetaStore = &metaStore{host: ts.ln.Addr().String()} + now := time.Now() + + shardID := uint64(1) + ownerID := uint64(2) + var points []tsdb.Point + points = append(points, tsdb.NewPoint( + "cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, + )) + + if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) { + t.Fatalf("expected error %v, to contain %s", err, exp) + } +} + +// Ensure the shard writer returns an error when reading times out. +func TestShardWriter_Write_ErrReadTimeout(t *testing.T) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + w := cluster.NewShardWriter(time.Millisecond) + w.MetaStore = &metaStore{host: ln.Addr().String()} + now := time.Now() + + shardID := uint64(1) + ownerID := uint64(2) + var points []tsdb.Point + points = append(points, tsdb.NewPoint( + "cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, + )) + + if err := w.WriteShard(shardID, ownerID, points); err == nil || !strings.Contains(err.Error(), "i/o timeout") { + t.Fatalf("unexpected error: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go new file mode 100644 index 000000000..06064eced --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go @@ -0,0 +1,779 @@ +package main + +import ( + "encoding/csv" + "encoding/json" + "flag" + "fmt" + "io" + "net" + "net/url" + "os" + "os/user" + "path/filepath" + "sort" + "strconv" + "strings" + "text/tabwriter" + + "github.com/influxdb/influxdb/client" + "github.com/influxdb/influxdb/cluster" + "github.com/influxdb/influxdb/importer/v8" + "github.com/peterh/liner" +) + +// These variables are populated via the Go linker. +var ( + version string = "0.9" +) + +const ( + // defaultFormat is the default format of the results when issuing queries + defaultFormat = "column" + + // defaultPrecision is the default timestamp format of the results when issuing queries + defaultPrecision = "ns" + + // defaultPPS is the default points per second that the import will throttle at + // by default it's 0, which means it will not throttle + defaultPPS = 0 +) + +type CommandLine struct { + Client *client.Client + Line *liner.State + Host string + Port int + Username string + Password string + Database string + Ssl bool + RetentionPolicy string + Version string + Pretty bool // controls pretty print for json + Format string // controls the output format. Valid values are json, csv, or column + Precision string + WriteConsistency string + Execute string + ShowVersion bool + Import bool + PPS int // Controls how many points per second the import will allow via throttling + Path string + Compressed bool +} + +func main() { + c := CommandLine{} + + fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError) + fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.") + fs.IntVar(&c.Port, "port", client.DefaultPort, "Influxdb port to connect to.") + fs.StringVar(&c.Username, "username", c.Username, "Username to connect to the server.") + fs.StringVar(&c.Password, "password", c.Password, `Password to connect to the server. Leaving blank will prompt for password (--password="").`) + fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.") + fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.") + fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.") + fs.StringVar(&c.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.") + fs.StringVar(&c.WriteConsistency, "consistency", "any", "Set write consistency level: any, one, quorum, or all.") + fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.") + fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.") + fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.") + fs.BoolVar(&c.Import, "import", false, "Import a previous database.") + fs.IntVar(&c.PPS, "pps", defaultPPS, "How many points per second the import will allow. By default it is zero and will not throttle importing.") + fs.StringVar(&c.Path, "path", "", "path to the file to import") + fs.BoolVar(&c.Compressed, "compressed", false, "set to true if the import file is compressed") + + // Define our own custom usage to print + fs.Usage = func() { + fmt.Println(`Usage of influx: + -version + Display the version and exit. + -host 'host name' + Host to connect to. + -port 'port #' + Port to connect to. + -database 'database name' + Database to connect to the server. + -password 'password' + Password to connect to the server. Leaving blank will prompt for password (--password ''). + -username 'username' + Username to connect to the server. + -ssl + Use https for requests. + -execute 'command' + Execute command and quit. + -format 'json|csv|column' + Format specifies the format of the server responses: json, csv, or column. + -precision 'rfc3339|h|m|s|ms|u|ns' + Precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns. + -consistency 'any|one|quorum|all' + Set write consistency level: any, one, quorum, or all + -pretty + Turns on pretty print for the json format. + -import + Import a previous database export from file + -pps + How many points per second the import will allow. By default it is zero and will not throttle importing. + -path + Path to file to import + -compressed + Set to true if the import file is compressed + +Examples: + + # Use influx in a non-interactive mode to query the database "metrics" and pretty print json: + $ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty + + # Connect to a specific database on startup and set database context: + $ influx -database 'metrics' -host 'localhost' -port '8086' +`) + } + fs.Parse(os.Args[1:]) + + if c.ShowVersion { + showVersion() + os.Exit(0) + } + + var promptForPassword bool + // determine if they set the password flag but provided no value + for _, v := range os.Args { + v = strings.ToLower(v) + if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.Password == "" { + promptForPassword = true + break + } + } + + c.Line = liner.NewLiner() + defer c.Line.Close() + + if promptForPassword { + p, e := c.Line.PasswordPrompt("password: ") + if e != nil { + fmt.Println("Unable to parse password.") + } else { + c.Password = p + } + } + + if err := c.connect(""); err != nil { + + } + if c.Execute == "" && !c.Import { + fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.Version) + } + + if c.Execute != "" { + // Modify precision before executing query + c.SetPrecision(c.Precision) + if err := c.ExecuteQuery(c.Execute); err != nil { + c.Line.Close() + os.Exit(1) + } + c.Line.Close() + os.Exit(0) + } + + if c.Import { + path := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) + u, e := client.ParseConnectionString(path, c.Ssl) + if e != nil { + fmt.Println(e) + return + } + + config := v8.NewConfig() + config.Username = c.Username + config.Password = c.Password + config.Precision = "ns" + config.WriteConsistency = "any" + config.Path = c.Path + config.Version = version + config.URL = u + config.Compressed = c.Compressed + config.PPS = c.PPS + config.Precision = c.Precision + + i := v8.NewImporter(config) + if err := i.Import(); err != nil { + fmt.Printf("ERROR: %s\n", err) + c.Line.Close() + os.Exit(1) + } + c.Line.Close() + os.Exit(0) + } + + showVersion() + + var historyFile string + usr, err := user.Current() + // Only load history if we can get the user + if err == nil { + historyFile = filepath.Join(usr.HomeDir, ".influx_history") + + if f, err := os.Open(historyFile); err == nil { + c.Line.ReadHistory(f) + f.Close() + } + } + + for { + l, e := c.Line.Prompt("> ") + if e != nil { + break + } + if c.ParseCommand(l) { + // write out the history + if len(historyFile) > 0 { + c.Line.AppendHistory(l) + if f, err := os.Create(historyFile); err == nil { + c.Line.WriteHistory(f) + f.Close() + } + } + } else { + break // exit main loop + } + } +} + +func showVersion() { + fmt.Println("InfluxDB shell " + version) +} + +func (c *CommandLine) ParseCommand(cmd string) bool { + lcmd := strings.TrimSpace(strings.ToLower(cmd)) + switch { + case strings.HasPrefix(lcmd, "exit"): + // signal the program to exit + return false + case strings.HasPrefix(lcmd, "gopher"): + c.gopher() + case strings.HasPrefix(lcmd, "connect"): + c.connect(cmd) + case strings.HasPrefix(lcmd, "auth"): + c.SetAuth(cmd) + case strings.HasPrefix(lcmd, "help"): + c.help() + case strings.HasPrefix(lcmd, "format"): + c.SetFormat(cmd) + case strings.HasPrefix(lcmd, "precision"): + c.SetPrecision(cmd) + case strings.HasPrefix(lcmd, "consistency"): + c.SetWriteConsistency(cmd) + case strings.HasPrefix(lcmd, "settings"): + c.Settings() + case strings.HasPrefix(lcmd, "pretty"): + c.Pretty = !c.Pretty + if c.Pretty { + fmt.Println("Pretty print enabled") + } else { + fmt.Println("Pretty print disabled") + } + case strings.HasPrefix(lcmd, "use"): + c.use(cmd) + case strings.HasPrefix(lcmd, "insert"): + c.Insert(cmd) + case lcmd == "": + break + default: + c.ExecuteQuery(cmd) + } + return true +} + +func (c *CommandLine) connect(cmd string) error { + var cl *client.Client + var u url.URL + + // Remove the "connect" keyword if it exists + path := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1)) + + // If they didn't provide a connection string, use the current settings + if path == "" { + path = net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) + } + + var e error + u, e = client.ParseConnectionString(path, c.Ssl) + if e != nil { + return e + } + + config := client.NewConfig() + config.URL = u + config.Username = c.Username + config.Password = c.Password + config.UserAgent = "InfluxDBShell/" + version + config.Precision = c.Precision + cl, err := client.NewClient(config) + if err != nil { + return fmt.Errorf("Could not create client %s", err) + } + c.Client = cl + if _, v, e := c.Client.Ping(); e != nil { + return fmt.Errorf("Failed to connect to %s\n", c.Client.Addr()) + } else { + c.Version = v + } + return nil +} + +func (c *CommandLine) SetAuth(cmd string) { + // If they pass in the entire command, we should parse it + // auth + args := strings.Fields(cmd) + if len(args) == 3 { + args = args[1:] + } else { + args = []string{} + } + + if len(args) == 2 { + c.Username = args[0] + c.Password = args[1] + } else { + u, e := c.Line.Prompt("username: ") + if e != nil { + fmt.Printf("Unable to process input: %s", e) + return + } + c.Username = strings.TrimSpace(u) + p, e := c.Line.PasswordPrompt("password: ") + if e != nil { + fmt.Printf("Unable to process input: %s", e) + return + } + c.Password = p + } + + // Update the client as well + c.Client.SetAuth(c.Username, c.Password) +} + +func (c *CommandLine) use(cmd string) { + args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") + if len(args) != 2 { + fmt.Printf("Could not parse database name from %q.\n", cmd) + return + } + d := args[1] + c.Database = d + fmt.Printf("Using database %s\n", d) +} + +func (c *CommandLine) SetPrecision(cmd string) { + // Remove the "precision" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1)) + // normalize cmd + cmd = strings.ToLower(cmd) + + switch cmd { + case "h", "m", "s", "ms", "u", "ns": + c.Precision = cmd + c.Client.SetPrecision(c.Precision) + case "rfc3339": + c.Precision = "" + c.Client.SetPrecision(c.Precision) + default: + fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd) + } +} + +func (c *CommandLine) SetFormat(cmd string) { + // Remove the "format" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1)) + // normalize cmd + cmd = strings.ToLower(cmd) + + switch cmd { + case "json", "csv", "column": + c.Format = cmd + default: + fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd) + } +} + +func (c *CommandLine) SetWriteConsistency(cmd string) { + // Remove the "consistency" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1)) + // normalize cmd + cmd = strings.ToLower(cmd) + + _, err := cluster.ParseConsistencyLevel(cmd) + if err != nil { + fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd) + return + } + c.WriteConsistency = cmd +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } + +// isLetter returns true if the rune is a letter. +func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') } + +// isDigit returns true if the rune is a digit. +func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') } + +// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer. +func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' } + +// isIdentChar returns true if the rune can be used in an unquoted identifier. +func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') } + +func parseUnquotedIdentifier(stmt string) (string, string) { + if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 { + return fields[0], strings.TrimPrefix(stmt, fields[0]) + } + return "", stmt +} + +func parseDoubleQuotedIdentifier(stmt string) (string, string) { + escapeNext := false + fields := strings.FieldsFunc(stmt, func(ch rune) bool { + if ch == '\\' { + escapeNext = true + } else if ch == '"' { + if !escapeNext { + return true + } + escapeNext = false + } + return false + }) + if len(fields) > 0 { + return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"") + } + return "", stmt +} + +func parseNextIdentifier(stmt string) (ident, remainder string) { + if len(stmt) > 0 { + switch { + case isWhitespace(rune(stmt[0])): + return parseNextIdentifier(stmt[1:]) + case isIdentFirstChar(rune(stmt[0])): + return parseUnquotedIdentifier(stmt) + case stmt[0] == '"': + return parseDoubleQuotedIdentifier(stmt) + } + } + return "", stmt +} + +func (c *CommandLine) parseInto(stmt string) string { + ident, stmt := parseNextIdentifier(stmt) + if strings.HasPrefix(stmt, ".") { + c.Database = ident + fmt.Printf("Using database %s\n", c.Database) + ident, stmt = parseNextIdentifier(stmt[1:]) + } + if strings.HasPrefix(stmt, " ") { + c.RetentionPolicy = ident + fmt.Printf("Using retention policy %s\n", c.RetentionPolicy) + return stmt[1:] + } + return stmt +} + +func (c *CommandLine) Insert(stmt string) error { + i, point := parseNextIdentifier(stmt) + if !strings.EqualFold(i, "insert") { + fmt.Printf("ERR: found %s, expected INSERT\n", i) + return nil + } + if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") { + point = c.parseInto(r) + } + _, err := c.Client.Write(client.BatchPoints{ + Points: []client.Point{ + client.Point{Raw: point}, + }, + Database: c.Database, + RetentionPolicy: c.RetentionPolicy, + Precision: "n", + WriteConsistency: c.WriteConsistency, + }) + if err != nil { + fmt.Printf("ERR: %s\n", err) + if c.Database == "" { + fmt.Println("Note: error may be due to not setting a database or retention policy.") + fmt.Println(`Please set a database with the command "use " or`) + fmt.Println("INSERT INTO . ") + } + return err + } + return nil +} + +func (c *CommandLine) ExecuteQuery(query string) error { + response, err := c.Client.Query(client.Query{Command: query, Database: c.Database}) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return err + } + c.FormatResponse(response, os.Stdout) + if err := response.Error(); err != nil { + fmt.Printf("ERR: %s\n", response.Error()) + if c.Database == "" { + fmt.Println("Warning: It is possible this error is due to not setting a database.") + fmt.Println(`Please set a database with the command "use ".`) + } + return err + } + return nil +} + +func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) { + switch c.Format { + case "json": + c.writeJSON(response, w) + case "csv": + c.writeCSV(response, w) + case "column": + c.writeColumns(response, w) + default: + fmt.Fprintf(w, "Unknown output format %q.\n", c.Format) + } +} + +func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) { + var data []byte + var err error + if c.Pretty { + data, err = json.MarshalIndent(response, "", " ") + } else { + data, err = json.Marshal(response) + } + if err != nil { + fmt.Fprintf(w, "Unable to parse json: %s\n", err) + return + } + fmt.Fprintln(w, string(data)) +} + +func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) { + csvw := csv.NewWriter(w) + for _, result := range response.Results { + // Create a tabbed writer for each result as they won't always line up + rows := c.formatResults(result, "\t") + for _, r := range rows { + csvw.Write(strings.Split(r, "\t")) + } + csvw.Flush() + } +} + +func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) { + for _, result := range response.Results { + // Create a tabbed writer for each result a they won't always line up + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 8, 1, '\t', 0) + csv := c.formatResults(result, "\t") + for _, r := range csv { + fmt.Fprintln(w, r) + } + w.Flush() + } +} + +// formatResults will behave differently if you are formatting for columns or csv +func (c *CommandLine) formatResults(result client.Result, separator string) []string { + rows := []string{} + // Create a tabbed writer for each result a they won't always line up + for i, row := range result.Series { + // gather tags + tags := []string{} + for k, v := range row.Tags { + tags = append(tags, fmt.Sprintf("%s=%s", k, v)) + sort.Strings(tags) + } + + columnNames := []string{} + + // Only put name/tags in a column if format is csv + if c.Format == "csv" { + if len(tags) > 0 { + columnNames = append([]string{"tags"}, columnNames...) + } + + if row.Name != "" { + columnNames = append([]string{"name"}, columnNames...) + } + } + + for _, column := range row.Columns { + columnNames = append(columnNames, column) + } + + // Output a line separator if we have more than one set or results and format is column + if i > 0 && c.Format == "column" { + rows = append(rows, "") + } + + // If we are column format, we break out the name/tag to seperate lines + if c.Format == "column" { + if row.Name != "" { + n := fmt.Sprintf("name: %s", row.Name) + rows = append(rows, n) + if len(tags) == 0 { + l := strings.Repeat("-", len(n)) + rows = append(rows, l) + } + } + if len(tags) > 0 { + t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", "))) + rows = append(rows, t) + } + } + + rows = append(rows, strings.Join(columnNames, separator)) + + // if format is column, break tags to their own line/format + if c.Format == "column" && len(tags) > 0 { + lines := []string{} + for _, columnName := range columnNames { + lines = append(lines, strings.Repeat("-", len(columnName))) + } + rows = append(rows, strings.Join(lines, separator)) + } + + for _, v := range row.Values { + var values []string + if c.Format == "csv" { + if row.Name != "" { + values = append(values, row.Name) + } + if len(tags) > 0 { + values = append(values, strings.Join(tags, ",")) + } + } + + for _, vv := range v { + values = append(values, interfaceToString(vv)) + } + rows = append(rows, strings.Join(values, separator)) + } + // Outout a line separator if in column format + if c.Format == "column" { + rows = append(rows, "") + } + } + return rows +} + +func interfaceToString(v interface{}) string { + switch t := v.(type) { + case nil: + return "" + case bool: + return fmt.Sprintf("%v", v) + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr: + return fmt.Sprintf("%d", t) + case float32, float64: + return fmt.Sprintf("%v", t) + default: + return fmt.Sprintf("%v", t) + } +} + +func (c *CommandLine) Settings() { + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 8, 1, '\t', 0) + if c.Port > 0 { + fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port) + } else { + fmt.Fprintf(w, "Host\t%s\n", c.Host) + } + fmt.Fprintf(w, "Username\t%s\n", c.Username) + fmt.Fprintf(w, "Database\t%s\n", c.Database) + fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty) + fmt.Fprintf(w, "Format\t%s\n", c.Format) + fmt.Fprintf(w, "Write Consistency\t%s\n", c.WriteConsistency) + fmt.Fprintln(w) + w.Flush() +} + +func (c *CommandLine) help() { + fmt.Println(`Usage: + connect connect to another node + auth prompt for username and password + pretty toggle pretty print + use set current databases + format set the output format: json, csv, or column + precision set the timestamp format: h,m,s,ms,u,ns + consistency set write consistency level: any, one, quorum, or all + settings output the current settings for the shell + exit quit the influx shell + + show databases show database names + show series show series information + show measurements show measurement information + show tag keys show tag key information + show tag values show tag value information + + a full list of influxql commands can be found at: + https://influxdb.com/docs/v0.9/query_language/spec.html +`) +} + +func (c *CommandLine) gopher() { + fmt.Println(` + .-::-::://:-::- .:/++/' + '://:-''/oo+//++o+/.://o- ./+: + .:-. '++- .o/ '+yydhy' o- + .:/. .h: :osoys .smMN- :/ + -/:.' s- /MMMymh. '/y/ s' + -+s:'''' d -mMMms// '-/o: + -/++/++/////:. o: '... s- :s. + :+-+s-' ':/' 's- /+ 'o: + '+-'o: /ydhsh. '//. '-o- o- + .y. o: .MMMdm+y ':+++:::/+:.' s: + .-h/ y- 'sdmds'h -+ydds:::-.' 'h. + .//-.d' o: '.' 'dsNMMMNh:.:++' :y + +y. 'd 's. .s:mddds: ++ o/ + 'N- odd 'o/. './o-s-' .---+++' o- + 'N' yNd .://:/:::::. -s -+/s/./s' 'o/' + so' .h '''' ////s: '+. .s +y' + os/-.y' 's' 'y::+ +d' + '.:o/ -+:-:.' so.---.' + o' 'd-.''/s' + .s' :y.''.y + -s mo:::' + :: yh + // '''' /M' + o+ .s///:/. 'N: + :+ /: -s' ho + 's- -/s/:+/.+h' +h + ys' ':' '-. -d + oh .h + /o .s + s. .h + -y .d + m/ -h + +d /o + 'N- y: + h: m. + s- -d + o- s+ + +- 'm' + s/ oo--. + y- /s ':+' + s' 'od--' .d: + -+ ':o: ':+-/+ + y- .:+- ' + //o- '.:+/. + .-:+/' ''-/+/. + ./:' ''.:o+/-' + .+o:/:/+-' ''.-+ooo/-' + o: -h///++////-. + /: .o/ + //+ 'y + ./sooy. + +`) +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main_test.go new file mode 100644 index 000000000..045fa319e --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main_test.go @@ -0,0 +1,219 @@ +package main_test + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdb/influxdb/client" + main "github.com/influxdb/influxdb/cmd/influx" +) + +func TestParseCommand_CommandsExist(t *testing.T) { + t.Parallel() + c := main.CommandLine{} + tests := []struct { + cmd string + }{ + {cmd: "gopher"}, + {cmd: "connect"}, + {cmd: "help"}, + {cmd: "pretty"}, + {cmd: "use"}, + {cmd: ""}, // test that a blank command just returns + } + for _, test := range tests { + if !c.ParseCommand(test.cmd) { + t.Fatalf(`Command failed for %q.`, test.cmd) + } + } +} + +func TestParseCommand_TogglePretty(t *testing.T) { + t.Parallel() + c := main.CommandLine{} + if c.Pretty { + t.Fatalf(`Pretty should be false.`) + } + c.ParseCommand("pretty") + if !c.Pretty { + t.Fatalf(`Pretty should be true.`) + } + c.ParseCommand("pretty") + if c.Pretty { + t.Fatalf(`Pretty should be false.`) + } +} + +func TestParseCommand_Exit(t *testing.T) { + t.Parallel() + c := main.CommandLine{} + tests := []struct { + cmd string + }{ + {cmd: "exit"}, + {cmd: " exit"}, + {cmd: "exit "}, + {cmd: "Exit "}, + } + + for _, test := range tests { + if c.ParseCommand(test.cmd) { + t.Fatalf(`Command "exit" failed for %q.`, test.cmd) + } + } +} + +func TestParseCommand_Use(t *testing.T) { + t.Parallel() + c := main.CommandLine{} + tests := []struct { + cmd string + }{ + {cmd: "use db"}, + {cmd: " use db"}, + {cmd: "use db "}, + {cmd: "use db;"}, + {cmd: "use db; "}, + {cmd: "Use db"}, + } + + for _, test := range tests { + if !c.ParseCommand(test.cmd) { + t.Fatalf(`Command "use" failed for %q.`, test.cmd) + } + + if c.Database != "db" { + t.Fatalf(`Command "use" changed database to %q. Expected db`, c.Database) + } + } +} + +func TestParseCommand_Consistency(t *testing.T) { + t.Parallel() + c := main.CommandLine{} + tests := []struct { + cmd string + }{ + {cmd: "consistency one"}, + {cmd: " consistency one"}, + {cmd: "consistency one "}, + {cmd: "consistency one;"}, + {cmd: "consistency one; "}, + {cmd: "Consistency one"}, + } + + for _, test := range tests { + if !c.ParseCommand(test.cmd) { + t.Fatalf(`Command "consistency" failed for %q.`, test.cmd) + } + + if c.WriteConsistency != "one" { + t.Fatalf(`Command "consistency" changed consistency to %q. Expected one`, c.WriteConsistency) + } + } +} + +func TestParseCommand_Insert(t *testing.T) { + t.Parallel() + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + m := main.CommandLine{Client: c} + + tests := []struct { + cmd string + }{ + {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: " INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: "insert cpu,host=serverA,region=us-west value=1.0 "}, + {cmd: "insert"}, + {cmd: "Insert "}, + {cmd: "insert c"}, + {cmd: "insert int"}, + } + + for _, test := range tests { + if !m.ParseCommand(test.cmd) { + t.Fatalf(`Command "insert" failed for %q.`, test.cmd) + } + } +} + +func TestParseCommand_InsertInto(t *testing.T) { + t.Parallel() + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + m := main.CommandLine{Client: c} + + tests := []struct { + cmd, db, rp string + }{ + { + cmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test", + }, + { + cmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test", + }, + { + cmd: `INSERT INTO "test test" cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test test", + }, + { + cmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`, + db: "test", + rp: "test", + }, + { + cmd: `insert into "test test" cpu,host=serverA,region=us-west value=1.0`, + db: "test", + rp: "test test", + }, + { + cmd: `insert into "d b"."test test" cpu,host=serverA,region=us-west value=1.0`, + db: "d b", + rp: "test test", + }, + } + + for _, test := range tests { + if !m.ParseCommand(test.cmd) { + t.Fatalf(`Command "insert into" failed for %q.`, test.cmd) + } + if m.Database != test.db { + t.Fatalf(`Command "insert into" db parsing failed, expected: %q, actual: %q`, test.db, m.Database) + } + if m.RetentionPolicy != test.rp { + t.Fatalf(`Command "insert into" rp parsing failed, expected: %q, actual: %q`, test.rp, m.RetentionPolicy) + } + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go new file mode 100644 index 000000000..9292fb02d --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go @@ -0,0 +1,67 @@ +package main + +import ( + "flag" + "fmt" + "runtime" + "sort" + "time" + + "github.com/influxdb/influxdb/stress" +) + +var ( + batchSize = flag.Int("batchsize", 5000, "number of points per batch") + seriesCount = flag.Int("series", 100000, "number of unique series to create") + pointCount = flag.Int("points", 100, "number of points per series to create") + concurrency = flag.Int("concurrency", 10, "number of simultaneous writes to run") + batchInterval = flag.Duration("batchinterval", 0*time.Second, "duration between batches") + database = flag.String("database", "stress", "name of database") + address = flag.String("addr", "localhost:8086", "IP address and port of database (e.g., localhost:8086)") + precision = flag.String("precision", "n", "The precision that points in the database will be with") +) + +var ms runner.Measurements + +func init() { + flag.Var(&ms, "m", "comma-separated list of intervals to use between events") +} + +func main() { + flag.Parse() + runtime.GOMAXPROCS(runtime.NumCPU()) + + if len(ms) == 0 { + ms = append(ms, "cpu") + } + + cfg := &runner.Config{ + BatchSize: *batchSize, + Measurements: ms, + SeriesCount: *seriesCount, + PointCount: *pointCount, + Concurrency: *concurrency, + BatchInterval: *batchInterval, + Database: *database, + Address: *address, + Precision: *precision, + } + + totalPoints, failedRequests, responseTimes, timer := runner.Run(cfg) + + sort.Sort(sort.Reverse(sort.Interface(responseTimes))) + + total := int64(0) + for _, t := range responseTimes { + total += int64(t.Value) + } + mean := total / int64(len(responseTimes)) + + fmt.Printf("Wrote %d points at average rate of %.0f\n", totalPoints, float64(totalPoints)/timer.Elapsed().Seconds()) + fmt.Printf("%d requests failed for %d total points that didn't get posted.\n", failedRequests, failedRequests**batchSize) + fmt.Println("Average response time: ", time.Duration(mean)) + fmt.Println("Slowest response times:") + for _, r := range responseTimes[:100] { + fmt.Println(time.Duration(r.Value)) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go new file mode 100644 index 000000000..c88652f75 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go @@ -0,0 +1,170 @@ +package backup + +import ( + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "os" + + "github.com/influxdb/influxdb/services/snapshotter" + "github.com/influxdb/influxdb/snapshot" +) + +// Suffix is a suffix added to the backup while it's in-process. +const Suffix = ".pending" + +// Command represents the program execution for "influxd backup". +type Command struct { + // The logger passed to the ticker during execution. + Logger *log.Logger + + // Standard input/output, overridden for testing. + Stderr io.Writer +} + +// NewCommand returns a new instance of Command with default settings. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + } +} + +// Run executes the program. +func (cmd *Command) Run(args ...string) error { + // Set up logger. + cmd.Logger = log.New(cmd.Stderr, "", log.LstdFlags) + cmd.Logger.Printf("influxdb backup") + + // Parse command line arguments. + host, path, err := cmd.parseFlags(args) + if err != nil { + return err + } + + // Retrieve snapshot from local file. + m, err := snapshot.ReadFileManifest(path) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("read file snapshot: %s", err) + } + + // Determine temporary path to download to. + tmppath := path + Suffix + + // Calculate path of next backup file. + // This uses the path if it doesn't exist. + // Otherwise it appends an autoincrementing number. + path, err = cmd.nextPath(path) + if err != nil { + return fmt.Errorf("next path: %s", err) + } + + // Retrieve snapshot. + if err := cmd.download(host, m, tmppath); err != nil { + return fmt.Errorf("download: %s", err) + } + + // Rename temporary file to final path. + if err := os.Rename(tmppath, path); err != nil { + return fmt.Errorf("rename: %s", err) + } + + // TODO: Check file integrity. + + // Notify user of completion. + cmd.Logger.Println("backup complete") + + return nil +} + +// parseFlags parses and validates the command line arguments. +func (cmd *Command) parseFlags(args []string) (host string, path string, err error) { + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&host, "host", "localhost:8088", "") + fs.SetOutput(cmd.Stderr) + fs.Usage = cmd.printUsage + if err := fs.Parse(args); err != nil { + return "", "", err + } + + // Ensure that only one arg is specified. + if fs.NArg() == 0 { + return "", "", errors.New("snapshot path required") + } else if fs.NArg() != 1 { + return "", "", errors.New("only one snapshot path allowed") + } + path = fs.Arg(0) + + return host, path, nil +} + +// nextPath returns the next file to write to. +func (cmd *Command) nextPath(path string) (string, error) { + // Use base path if it doesn't exist. + if _, err := os.Stat(path); os.IsNotExist(err) { + return path, nil + } else if err != nil { + return "", err + } + + // Otherwise iterate through incremental files until one is available. + for i := 0; ; i++ { + s := fmt.Sprintf(path+".%d", i) + if _, err := os.Stat(s); os.IsNotExist(err) { + return s, nil + } else if err != nil { + return "", err + } + } +} + +// download downloads a snapshot from a host to a given path. +func (cmd *Command) download(host string, m *snapshot.Manifest, path string) error { + // Create local file to write to. + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("open temp file: %s", err) + } + defer f.Close() + + // Connect to snapshotter service. + conn, err := net.Dial("tcp", host) + if err != nil { + return err + } + defer conn.Close() + + // Send snapshotter marker byte. + if _, err := conn.Write([]byte{snapshotter.MuxHeader}); err != nil { + return fmt.Errorf("write snapshot header byte: %s", err) + } + + // Write the manifest we currently have. + if err := json.NewEncoder(conn).Encode(m); err != nil { + return fmt.Errorf("encode snapshot manifest: %s", err) + } + + // Read snapshot from the connection. + if _, err := io.Copy(f, conn); err != nil { + return fmt.Errorf("copy snapshot to file: %s", err) + } + + // FIXME(benbjohnson): Verify integrity of snapshot. + + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + fmt.Fprintf(cmd.Stderr, `usage: influxd backup [flags] PATH + +backup downloads a snapshot of a data node and saves it to disk. + + -host + The host to connect to snapshot. + Defaults to 127.0.0.1:8088. +`) +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go new file mode 100644 index 000000000..15db96449 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go @@ -0,0 +1,125 @@ +package backup_test + +/* +import ( + "bytes" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "github.com/influxdb/influxdb" + "github.com/influxdb/influxdb/cmd/influxd" +) + +// Ensure the backup can download from the server and save to disk. +func TestBackupCommand(t *testing.T) { + // Mock the backup endpoint. + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/data/snapshot" { + t.Fatalf("unexpected url path: %s", r.URL.Path) + } + + // Write a simple snapshot to the buffer. + sw := influxdb.NewSnapshotWriter() + sw.Snapshot = &influxdb.Snapshot{Files: []influxdb.SnapshotFile{ + {Name: "meta", Size: 5, Index: 10}, + }} + sw.FileWriters["meta"] = influxdb.NopWriteToCloser(bytes.NewBufferString("55555")) + if _, err := sw.WriteTo(w); err != nil { + t.Fatal(err) + } + })) + defer s.Close() + + // Create a temp path and remove incremental backups at the end. + path := tempfile() + defer os.Remove(path) + defer os.Remove(path + ".0") + defer os.Remove(path + ".1") + + // Execute the backup against the mock server. + for i := 0; i < 3; i++ { + if err := NewBackupCommand().Run("-host", s.URL, path); err != nil { + t.Fatal(err) + } + } + + // Verify snapshot and two incremental snapshots were written. + if _, err := os.Stat(path); err != nil { + t.Fatalf("snapshot not found: %s", err) + } else if _, err = os.Stat(path + ".0"); err != nil { + t.Fatalf("incremental snapshot(0) not found: %s", err) + } else if _, err = os.Stat(path + ".1"); err != nil { + t.Fatalf("incremental snapshot(1) not found: %s", err) + } +} + +// Ensure the backup command returns an error if flags cannot be parsed. +func TestBackupCommand_ErrFlagParse(t *testing.T) { + cmd := NewBackupCommand() + if err := cmd.Run("-bad-flag"); err == nil || err.Error() != `flag provided but not defined: -bad-flag` { + t.Fatal(err) + } else if !strings.Contains(cmd.Stderr.String(), "usage") { + t.Fatal("usage message not displayed") + } +} + +// Ensure the backup command returns an error if the host cannot be parsed. +func TestBackupCommand_ErrInvalidHostURL(t *testing.T) { + if err := NewBackupCommand().Run("-host", "http://%f"); err == nil || err.Error() != `parse host url: parse http://%f: hexadecimal escape in host` { + t.Fatal(err) + } +} + +// Ensure the backup command returns an error if the output path is not specified. +func TestBackupCommand_ErrPathRequired(t *testing.T) { + if err := NewBackupCommand().Run("-host", "//localhost"); err == nil || err.Error() != `snapshot path required` { + t.Fatal(err) + } +} + +// Ensure the backup returns an error if it cannot connect to the server. +func TestBackupCommand_ErrConnectionRefused(t *testing.T) { + // Start and immediately stop a server so we have a dead port. + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + s.Close() + + // Execute the backup command. + path := tempfile() + defer os.Remove(path) + if err := NewBackupCommand().Run("-host", s.URL, path); err == nil || + !(strings.Contains(err.Error(), `connection refused`) || strings.Contains(err.Error(), `No connection could be made`)) { + t.Fatal(err) + } +} + +// Ensure the backup returns any non-200 status codes. +func TestBackupCommand_ErrServerError(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer s.Close() + + // Execute the backup command. + path := tempfile() + defer os.Remove(path) + if err := NewBackupCommand().Run("-host", s.URL, path); err == nil || err.Error() != `download: snapshot error: status=500` { + t.Fatal(err) + } +} + +// BackupCommand is a test wrapper for main.BackupCommand. +type BackupCommand struct { + *main.BackupCommand + Stderr bytes.Buffer +} + +// NewBackupCommand returns a new instance of BackupCommand. +func NewBackupCommand() *BackupCommand { + cmd := &BackupCommand{BackupCommand: main.NewBackupCommand()} + cmd.BackupCommand.Stderr = &cmd.Stderr + return cmd +} +*/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go new file mode 100644 index 000000000..3f6bbfb08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go @@ -0,0 +1,46 @@ +package help + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Command displays help for command-line sub-commands. +type Command struct { + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) + return nil +} + +const usage = ` +Configure and start an InfluxDB server. + +Usage: + + influxd [[command] [arguments]] + +The commands are: + + backup downloads a snapshot of a data node and saves it to disk + config display the default configuration + restore uses a snapshot of a data node to rebuild a cluster + run run node with existing configuration + version displays the InfluxDB version + +"run" is the default command. + +Use "influxd help [command]" for more information about a command. +` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go new file mode 100644 index 000000000..9748493a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go @@ -0,0 +1,200 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "math/rand" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/influxdb/influxdb/cmd/influxd/backup" + "github.com/influxdb/influxdb/cmd/influxd/help" + "github.com/influxdb/influxdb/cmd/influxd/restore" + "github.com/influxdb/influxdb/cmd/influxd/run" +) + +// These variables are populated via the Go linker. +var ( + version string = "0.9" + commit string + branch string +) + +func init() { + // If commit or branch are not set, make that clear. + if commit == "" { + commit = "unknown" + } + if branch == "" { + branch = "unknown" + } +} + +func main() { + rand.Seed(time.Now().UnixNano()) + + m := NewMain() + if err := m.Run(os.Args[1:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// Main represents the program execution. +type Main struct { + Logger *log.Logger + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain return a new instance of Main. +func NewMain() *Main { + return &Main{ + Logger: log.New(os.Stderr, "[run] ", log.LstdFlags), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run determines and runs the command specified by the CLI args. +func (m *Main) Run(args ...string) error { + name, args := ParseCommandName(args) + + // Extract name from args. + switch name { + case "", "run": + cmd := run.NewCommand() + + // Tell the server the build details. + cmd.Version = version + cmd.Commit = commit + cmd.Branch = branch + + if err := cmd.Run(args...); err != nil { + return fmt.Errorf("run: %s", err) + } + + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) + m.Logger.Println("Listening for signals") + + // Block until one of the signals above is received + select { + case <-signalCh: + m.Logger.Println("Signal received, initializing clean shutdown...") + go func() { + cmd.Close() + }() + } + + // Block again until another signal is received, a shutdown timeout elapses, + // or the Command is gracefully closed + m.Logger.Println("Waiting for clean shutdown...") + select { + case <-signalCh: + m.Logger.Println("second signal received, initializing hard shutdown") + case <-time.After(time.Second * 30): + m.Logger.Println("time limit reached, initializing hard shutdown") + case <-cmd.Closed: + m.Logger.Println("server shutdown completed") + } + + // goodbye. + + case "backup": + name := backup.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("backup: %s", err) + } + case "restore": + name := restore.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("restore: %s", err) + } + case "config": + if err := run.NewPrintConfigCommand().Run(args...); err != nil { + return fmt.Errorf("config: %s", err) + } + case "version": + if err := NewVersionCommand().Run(args...); err != nil { + return fmt.Errorf("version: %s", err) + } + case "help": + if err := help.NewCommand().Run(args...); err != nil { + return fmt.Errorf("help: %s", err) + } + default: + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influxd help' for usage`+"\n\n", name) + } + + return nil +} + +// ParseCommandName extracts the command name and args from the args list. +func ParseCommandName(args []string) (string, []string) { + // Retrieve command name as first argument. + var name string + if len(args) > 0 && !strings.HasPrefix(args[0], "-") { + name = args[0] + } + + // Special case -h immediately following binary name + if len(args) > 0 && args[0] == "-h" { + name = "help" + } + + // If command is "help" and has an argument then rewrite args to use "-h". + if name == "help" && len(args) > 1 { + args[0], args[1] = args[1], "-h" + name = args[0] + } + + // If a named command is specified then return it with its arguments. + if name != "" { + return name, args[1:] + } + return "", args +} + +// Command represents the command executed by "influxd version". +type VersionCommand struct { + Stdout io.Writer + Stderr io.Writer +} + +// NewVersionCommand return a new instance of VersionCommand. +func NewVersionCommand() *VersionCommand { + return &VersionCommand{ + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run prints the current version and commit info. +func (cmd *VersionCommand) Run(args ...string) error { + // Parse flags in case -h is specified. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, strings.TrimSpace(versionUsage)) } + if err := fs.Parse(args); err != nil { + return err + } + + // Print version info. + fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s)\n", version, branch, commit) + + return nil +} + +var versionUsage = ` +usage: version + + version displays the InfluxDB version, build branch and git commit hash +` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go new file mode 100644 index 000000000..5a95f8726 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go @@ -0,0 +1,250 @@ +package restore + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io" + "net" + "os" + "path/filepath" + + "github.com/BurntSushi/toml" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/snapshot" + "github.com/influxdb/influxdb/tsdb" +) + +// Command represents the program execution for "influxd restore". +type Command struct { + Stdout io.Writer + Stderr io.Writer +} + +// NewCommand returns a new instance of Command with default settings. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run executes the program. +func (cmd *Command) Run(args ...string) error { + config, path, err := cmd.parseFlags(args) + if err != nil { + return err + } + + return cmd.Restore(config, path) +} + +func (cmd *Command) Restore(config *Config, path string) error { + // Remove meta and data directories. + if err := os.RemoveAll(config.Meta.Dir); err != nil { + return fmt.Errorf("remove meta dir: %s", err) + } else if err := os.RemoveAll(config.Data.Dir); err != nil { + return fmt.Errorf("remove data dir: %s", err) + } + + // Open snapshot file and all incremental backups. + mr, files, err := snapshot.OpenFileMultiReader(path) + if err != nil { + return fmt.Errorf("open multireader: %s", err) + } + defer closeAll(files) + + // Unpack files from archive. + if err := cmd.unpack(mr, config); err != nil { + return fmt.Errorf("unpack: %s", err) + } + + // Notify user of completion. + fmt.Fprintf(os.Stdout, "restore complete using %s", path) + return nil +} + +// parseFlags parses and validates the command line arguments. +func (cmd *Command) parseFlags(args []string) (*Config, string, error) { + fs := flag.NewFlagSet("", flag.ContinueOnError) + configPath := fs.String("config", "", "") + fs.SetOutput(cmd.Stderr) + fs.Usage = cmd.printUsage + if err := fs.Parse(args); err != nil { + return nil, "", err + } + + // Parse configuration file from disk. + if *configPath == "" { + return nil, "", fmt.Errorf("config required") + } + + // Parse config. + config := Config{ + Meta: meta.NewConfig(), + Data: tsdb.NewConfig(), + } + if _, err := toml.DecodeFile(*configPath, &config); err != nil { + return nil, "", err + } + + // Require output path. + path := fs.Arg(0) + if path == "" { + return nil, "", fmt.Errorf("snapshot path required") + } + + return &config, path, nil +} + +func closeAll(a []io.Closer) { + for _, c := range a { + _ = c.Close() + } +} + +// unpack expands the files in the snapshot archive into a directory. +func (cmd *Command) unpack(mr *snapshot.MultiReader, config *Config) error { + // Loop over files and extract. + for { + // Read entry header. + sf, err := mr.Next() + if err == io.EOF { + break + } else if err != nil { + return fmt.Errorf("next: entry=%s, err=%s", sf.Name, err) + } + + // Log progress. + fmt.Fprintf(os.Stdout, "unpacking: %s (%d bytes)\n", sf.Name, sf.Size) + + // Handle meta and tsdb files separately. + switch sf.Name { + case "meta": + if err := cmd.unpackMeta(mr, sf, config); err != nil { + return fmt.Errorf("meta: %s", err) + } + default: + if err := cmd.unpackData(mr, sf, config); err != nil { + return fmt.Errorf("data: %s", err) + } + } + } + + return nil +} + +// unpackMeta reads the metadata from the snapshot and initializes a raft +// cluster and replaces the root metadata. +func (cmd *Command) unpackMeta(mr *snapshot.MultiReader, sf snapshot.File, config *Config) error { + // Read meta into buffer. + var buf bytes.Buffer + if _, err := io.CopyN(&buf, mr, sf.Size); err != nil { + return fmt.Errorf("copy: %s", err) + } + + // Unpack into metadata. + var data meta.Data + if err := data.UnmarshalBinary(buf.Bytes()); err != nil { + return fmt.Errorf("unmarshal: %s", err) + } + + // Copy meta config and remove peers so it starts in single mode. + c := config.Meta + c.Peers = nil + + // Initialize meta store. + store := meta.NewStore(config.Meta) + store.RaftListener = newNopListener() + store.ExecListener = newNopListener() + + // Determine advertised address. + _, port, err := net.SplitHostPort(config.Meta.BindAddress) + if err != nil { + return fmt.Errorf("split bind address: %s", err) + } + hostport := net.JoinHostPort(config.Meta.Hostname, port) + + // Resolve address. + addr, err := net.ResolveTCPAddr("tcp", hostport) + if err != nil { + return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err) + } + store.Addr = addr + + // Open the meta store. + if err := store.Open(); err != nil { + return fmt.Errorf("open store: %s", err) + } + defer store.Close() + + // Wait for the store to be ready or error. + select { + case <-store.Ready(): + case err := <-store.Err(): + return err + } + + // Force set the full metadata. + if err := store.SetData(&data); err != nil { + return fmt.Errorf("set data: %s", err) + } + + return nil +} + +func (cmd *Command) unpackData(mr *snapshot.MultiReader, sf snapshot.File, config *Config) error { + path := filepath.Join(config.Data.Dir, sf.Name) + // Create parent directory for output file. + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return fmt.Errorf("mkdir: entry=%s, err=%s", sf.Name, err) + } + + // Create output file. + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("create: entry=%s, err=%s", sf.Name, err) + } + defer f.Close() + + // Copy contents from reader. + if _, err := io.CopyN(f, mr, sf.Size); err != nil { + return fmt.Errorf("copy: entry=%s, err=%s", sf.Name, err) + } + + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + fmt.Fprintf(cmd.Stderr, `usage: influxd restore [flags] PATH + +restore uses a snapshot of a data node to rebuild a cluster. + + -config + Set the path to the configuration file. +`) +} + +// Config represents a partial config for rebuilding the server. +type Config struct { + Meta *meta.Config `toml:"meta"` + Data tsdb.Config `toml:"data"` +} + +type nopListener struct { + closing chan struct{} +} + +func newNopListener() *nopListener { + return &nopListener{make(chan struct{})} +} + +func (ln *nopListener) Accept() (net.Conn, error) { + <-ln.closing + return nil, errors.New("listener closing") +} + +func (ln *nopListener) Close() error { close(ln.closing); return nil } +func (ln *nopListener) Addr() net.Addr { return nil } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go new file mode 100644 index 000000000..6e3143f25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go @@ -0,0 +1,155 @@ +package restore_test + +/* +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + main "github.com/influxdb/influxdb/cmd/influxd" + "github.com/influxdb/influxdb/tsdb" +) + +func newConfig(path string, port int) main.Config { + config := main.NewConfig() + config.Port = port + config.Broker.Enabled = true + config.Broker.Dir = filepath.Join(path, "broker") + + config.Data.Enabled = true + config.Data.Dir = filepath.Join(path, "data") + return *config +} + +// Ensure the restore command can expand a snapshot and bootstrap a broker. +func TestRestoreCommand(t *testing.T) { + if testing.Short() { + t.Skip("skipping TestRestoreCommand") + } + + now := time.Now() + + // Create root path to server. + path := tempfile() + defer os.Remove(path) + + // Parse configuration. + config := newConfig(path, 8900) + + // Start server. + cmd := main.NewRunCommand() + node := cmd.Open(&config, "") + if node.Broker == nil { + t.Fatal("cannot run broker") + } else if node.DataNode == nil { + t.Fatal("cannot run server") + } + b := node.Broker + s := node.DataNode + + // Create data. + if err := s.CreateDatabase("db"); err != nil { + t.Fatalf("cannot create database: %s", err) + } + if index, err := s.WriteSeries("db", "default", []tsdb.Point{tsdb.NewPoint("cpu", nil, map[string]interface{}{"value": float64(100)}, now)}); err != nil { + t.Fatalf("cannot write series: %s", err) + } else if err = s.Sync(1, index); err != nil { + t.Fatalf("shard sync: %s", err) + } + + // Create snapshot writer. + sw, err := s.CreateSnapshotWriter() + if err != nil { + t.Fatalf("create snapshot writer: %s", err) + } + + // Snapshot to file. + sspath := tempfile() + f, err := os.Create(sspath) + if err != nil { + t.Fatal(err) + } + sw.WriteTo(f) + f.Close() + + // Stop server. + node.Close() + + // Remove data & broker directories. + if err := os.RemoveAll(path); err != nil { + t.Fatalf("remove: %s", err) + } + + // Execute the restore. + if err := NewRestoreCommand().Restore(&config, sspath); err != nil { + t.Fatal(err) + } + + // Rewrite config to a new port and re-parse. + config = newConfig(path, 8910) + + // Restart server. + cmd = main.NewRunCommand() + node = cmd.Open(&config, "") + if b == nil { + t.Fatal("cannot run broker") + } else if s == nil { + t.Fatal("cannot run server") + } + b = node.Broker + s = node.DataNode + + // Write new data. + if err := s.CreateDatabase("newdb"); err != nil { + t.Fatalf("cannot create new database: %s", err) + } + if index, err := s.WriteSeries("newdb", "default", []tsdb.Point{tsdb.NewPoint("mem", nil, map[string]interface{}{"value": float64(1000)}, now)}); err != nil { + t.Fatalf("cannot write new series: %s", err) + } else if err = s.Sync(2, index); err != nil { + t.Fatalf("shard sync: %s", err) + } + + // Read series data. + if v, err := s.ReadSeries("db", "default", "cpu", nil, now); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(v, map[string]interface{}{"value": float64(100)}) { + t.Fatalf("read series(0) mismatch: %#v", v) + } + + // Read new series data. + if v, err := s.ReadSeries("newdb", "default", "mem", nil, now); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(v, map[string]interface{}{"value": float64(1000)}) { + t.Fatalf("read series(1) mismatch: %#v", v) + } + + // Stop server. + node.Close() +} + +// RestoreCommand is a test wrapper for main.RestoreCommand. +type RestoreCommand struct { + *main.RestoreCommand + Stderr bytes.Buffer +} + +// NewRestoreCommand returns a new instance of RestoreCommand. +func NewRestoreCommand() *RestoreCommand { + cmd := &RestoreCommand{RestoreCommand: main.NewRestoreCommand()} + cmd.RestoreCommand.Stderr = &cmd.Stderr + return cmd +} + +// MustReadFile reads data from a file. Panic on error. +func MustReadFile(filename string) []byte { + b, err := ioutil.ReadFile(filename) + if err != nil { + panic(err.Error()) + } + return b +} +*/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go new file mode 100644 index 000000000..021435be2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go @@ -0,0 +1,233 @@ +package run + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml" +) + +const logo = ` + 8888888 .d888 888 8888888b. 888888b. + 888 d88P" 888 888 "Y88b 888 "88b + 888 888 888 888 888 888 .88P + 888 88888b. 888888 888 888 888 888 888 888 888 8888888K. + 888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b + 888 888 888 888 888 888 888 X88K 888 888 888 888 + 888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P + 8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P" + +` + +// Command represents the command executed by "influxd run". +type Command struct { + Version string + Branch string + Commit string + + closing chan struct{} + Closed chan struct{} + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + + Server *Server +} + +// NewCommand return a new instance of Command. +func NewCommand() *Command { + return &Command{ + closing: make(chan struct{}), + Closed: make(chan struct{}), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run parses the config from args and runs the server. +func (cmd *Command) Run(args ...string) error { + // Parse the command line flags. + options, err := cmd.ParseFlags(args...) + if err != nil { + return err + } + + // Print sweet InfluxDB logo. + fmt.Print(logo) + + // Mark start-up in log. + log.Printf("InfluxDB starting, version %s, branch %s, commit %s", cmd.Version, cmd.Branch, cmd.Commit) + log.Printf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0)) + + // Write the PID file. + if err := cmd.writePIDFile(options.PIDFile); err != nil { + return fmt.Errorf("write pid file: %s", err) + } + + // Turn on block profiling to debug stuck databases + runtime.SetBlockProfileRate(int(1 * time.Second)) + + // Parse config + config, err := cmd.ParseConfig(options.ConfigPath) + if err != nil { + return fmt.Errorf("parse config: %s", err) + } + + // Apply any environment variables on top of the parsed config + if err := config.ApplyEnvOverrides(); err != nil { + return fmt.Errorf("apply env config: %v", err) + } + + // Override config hostname if specified in the command line args. + if options.Hostname != "" { + config.Meta.Hostname = options.Hostname + } + + if options.Join != "" { + config.Meta.Peers = strings.Split(options.Join, ",") + } + + // Validate the configuration. + if err := config.Validate(); err != nil { + return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`.", err) + } + + // Create server from config and start it. + buildInfo := &BuildInfo{Version: cmd.Version, Commit: cmd.Commit, Branch: cmd.Branch} + s, err := NewServer(config, buildInfo) + if err != nil { + return fmt.Errorf("create server: %s", err) + } + s.CPUProfile = options.CPUProfile + s.MemProfile = options.MemProfile + if err := s.Open(); err != nil { + return fmt.Errorf("open server: %s", err) + } + cmd.Server = s + + // Begin monitoring the server's error channel. + go cmd.monitorServerErrors() + + return nil +} + +// Close shuts down the server. +func (cmd *Command) Close() error { + defer close(cmd.Closed) + close(cmd.closing) + if cmd.Server != nil { + return cmd.Server.Close() + } + return nil +} + +func (cmd *Command) monitorServerErrors() { + logger := log.New(cmd.Stderr, "", log.LstdFlags) + for { + select { + case err := <-cmd.Server.Err(): + logger.Println(err) + case <-cmd.closing: + return + } + } +} + +// ParseFlags parses the command line flags from args and returns an options set. +func (cmd *Command) ParseFlags(args ...string) (Options, error) { + var options Options + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&options.ConfigPath, "config", "", "") + fs.StringVar(&options.PIDFile, "pidfile", "", "") + fs.StringVar(&options.Hostname, "hostname", "", "") + fs.StringVar(&options.Join, "join", "", "") + fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") + fs.StringVar(&options.MemProfile, "memprofile", "", "") + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) } + if err := fs.Parse(args); err != nil { + return Options{}, err + } + return options, nil +} + +// writePIDFile writes the process ID to path. +func (cmd *Command) writePIDFile(path string) error { + // Ignore if path is not set. + if path == "" { + return nil + } + + // Ensure the required directory structure exists. + err := os.MkdirAll(filepath.Dir(path), 0777) + if err != nil { + return fmt.Errorf("mkdir: %s", err) + } + + // Retrieve the PID and write it. + pid := strconv.Itoa(os.Getpid()) + if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil { + return fmt.Errorf("write file: %s", err) + } + + return nil +} + +// ParseConfig parses the config at path. +// Returns a demo configuration if path is blank. +func (cmd *Command) ParseConfig(path string) (*Config, error) { + // Use demo configuration if no config path is specified. + if path == "" { + log.Println("no configuration provided, using default settings") + return NewDemoConfig() + } + + log.Printf("Using configuration at: %s\n", path) + + config := NewConfig() + if _, err := toml.DecodeFile(path, &config); err != nil { + return nil, err + } + + return config, nil +} + +var usage = `usage: run [flags] + +run starts the broker and data node server. If this is the first time running +the command then a new cluster will be initialized unless the -join argument +is used. + + -config + Set the path to the configuration file. + + -hostname + Override the hostname, the 'hostname' configuration + option will be overridden. + + -join + Joins the server to an existing cluster. + + -pidfile + Write process ID to a file. +` + +// Options represents the command line options that can be parsed. +type Options struct { + ConfigPath string + PIDFile string + Hostname string + Join string + CPUProfile string + MemProfile string +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go new file mode 100644 index 000000000..0b35fd95b --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go @@ -0,0 +1,225 @@ +package run + +import ( + "errors" + "fmt" + "os" + "os/user" + "path/filepath" + "reflect" + "strconv" + "strings" + "time" + + "github.com/influxdb/influxdb/cluster" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/monitor" + "github.com/influxdb/influxdb/services/admin" + "github.com/influxdb/influxdb/services/collectd" + "github.com/influxdb/influxdb/services/continuous_querier" + "github.com/influxdb/influxdb/services/graphite" + "github.com/influxdb/influxdb/services/hh" + "github.com/influxdb/influxdb/services/httpd" + "github.com/influxdb/influxdb/services/opentsdb" + "github.com/influxdb/influxdb/services/precreator" + "github.com/influxdb/influxdb/services/retention" + "github.com/influxdb/influxdb/services/udp" + "github.com/influxdb/influxdb/tsdb" +) + +// Config represents the configuration format for the influxd binary. +type Config struct { + Meta *meta.Config `toml:"meta"` + Data tsdb.Config `toml:"data"` + Cluster cluster.Config `toml:"cluster"` + Retention retention.Config `toml:"retention"` + Precreator precreator.Config `toml:"shard-precreation"` + + Admin admin.Config `toml:"admin"` + Monitor monitor.Config `toml:"monitor"` + HTTPD httpd.Config `toml:"http"` + Graphites []graphite.Config `toml:"graphite"` + Collectd collectd.Config `toml:"collectd"` + OpenTSDB opentsdb.Config `toml:"opentsdb"` + UDPs []udp.Config `toml:"udp"` + + // Snapshot SnapshotConfig `toml:"snapshot"` + ContinuousQuery continuous_querier.Config `toml:"continuous_queries"` + + HintedHandoff hh.Config `toml:"hinted-handoff"` + + // Server reporting + ReportingDisabled bool `toml:"reporting-disabled"` +} + +// NewConfig returns an instance of Config with reasonable defaults. +func NewConfig() *Config { + c := &Config{} + c.Meta = meta.NewConfig() + c.Data = tsdb.NewConfig() + c.Cluster = cluster.NewConfig() + c.Precreator = precreator.NewConfig() + + c.Admin = admin.NewConfig() + c.Monitor = monitor.NewConfig() + c.HTTPD = httpd.NewConfig() + c.Collectd = collectd.NewConfig() + c.OpenTSDB = opentsdb.NewConfig() + + c.ContinuousQuery = continuous_querier.NewConfig() + c.Retention = retention.NewConfig() + c.HintedHandoff = hh.NewConfig() + + return c +} + +// NewDemoConfig returns the config that runs when no config is specified. +func NewDemoConfig() (*Config, error) { + c := NewConfig() + + var homeDir string + // By default, store meta and data files in current users home directory + u, err := user.Current() + if err == nil { + homeDir = u.HomeDir + } else if os.Getenv("HOME") != "" { + homeDir = os.Getenv("HOME") + } else { + return nil, fmt.Errorf("failed to determine current user for storage") + } + + c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta") + c.Data.Dir = filepath.Join(homeDir, ".influxdb/data") + c.HintedHandoff.Dir = filepath.Join(homeDir, ".influxdb/hh") + c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal") + + c.Admin.Enabled = true + + return c, nil +} + +// Validate returns an error if the config is invalid. +func (c *Config) Validate() error { + if c.Meta.Dir == "" { + return errors.New("Meta.Dir must be specified") + } else if c.Data.Dir == "" { + return errors.New("Data.Dir must be specified") + } else if c.HintedHandoff.Dir == "" { + return errors.New("HintedHandoff.Dir must be specified") + } else if c.Data.WALDir == "" { + return errors.New("Data.WALDir must be specified") + } + + for _, g := range c.Graphites { + if err := g.Validate(); err != nil { + return fmt.Errorf("invalid graphite config: %v", err) + } + } + return nil +} + +func (c *Config) ApplyEnvOverrides() error { + return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c)) +} + +func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value) error { + // If we have a pointer, dereference it + s := spec + if spec.Kind() == reflect.Ptr { + s = spec.Elem() + } + + // Make sure we have struct + if s.Kind() != reflect.Struct { + return nil + } + + typeOfSpec := s.Type() + for i := 0; i < s.NumField(); i++ { + f := s.Field(i) + // Get the toml tag to determine what env var name to use + configName := typeOfSpec.Field(i).Tag.Get("toml") + // Replace hyphens with underscores to avoid issues with shells + configName = strings.Replace(configName, "-", "_", -1) + fieldName := typeOfSpec.Field(i).Name + + // Skip any fields that we cannot set + if f.CanSet() || f.Kind() == reflect.Slice { + + // Use the upper-case prefix and toml name for the env var + key := strings.ToUpper(configName) + if prefix != "" { + key = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName)) + } + value := os.Getenv(key) + + // If the type is s slice, apply to each using the index as a suffix + // e.g. GRAPHITE_0 + if f.Kind() == reflect.Slice || f.Kind() == reflect.Array { + for i := 0; i < f.Len(); i++ { + if err := c.applyEnvOverrides(fmt.Sprintf("%s_%d", key, i), f.Index(i)); err != nil { + return err + } + } + continue + } + + // If it's a sub-config, recursively apply + if f.Kind() == reflect.Struct || f.Kind() == reflect.Ptr { + if err := c.applyEnvOverrides(key, f); err != nil { + return err + } + continue + } + + // Skip any fields we don't have a value to set + if value == "" { + continue + } + + switch f.Kind() { + case reflect.String: + f.SetString(value) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + var intValue int64 + + // Handle toml.Duration + if f.Type().Name() == "Duration" { + dur, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value) + } + intValue = dur.Nanoseconds() + } else { + var err error + intValue, err = strconv.ParseInt(value, 0, f.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value) + } + } + + f.SetInt(intValue) + case reflect.Bool: + boolValue, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value) + + } + f.SetBool(boolValue) + case reflect.Float32, reflect.Float64: + floatValue, err := strconv.ParseFloat(value, f.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value) + + } + f.SetFloat(floatValue) + default: + if err := c.applyEnvOverrides(key, f); err != nil { + return err + } + } + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go new file mode 100644 index 000000000..364cf61b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go @@ -0,0 +1,83 @@ +package run + +import ( + "flag" + "fmt" + "io" + "os" + + "github.com/BurntSushi/toml" +) + +// PrintConfigCommand represents the command executed by "influxd config". +type PrintConfigCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewPrintConfigCommand return a new instance of PrintConfigCommand. +func NewPrintConfigCommand() *PrintConfigCommand { + return &PrintConfigCommand{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run parses and prints the current config loaded. +func (cmd *PrintConfigCommand) Run(args ...string) error { + // Parse command flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + configPath := fs.String("config", "", "") + hostname := fs.String("hostname", "", "") + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) } + if err := fs.Parse(args); err != nil { + return err + } + + // Parse config from path. + config, err := cmd.parseConfig(*configPath) + if err != nil { + return fmt.Errorf("parse config: %s", err) + } + + // Apply any environment variables on top of the parsed config + if err := config.ApplyEnvOverrides(); err != nil { + return fmt.Errorf("apply env config: %v", err) + } + + // Override config properties. + if *hostname != "" { + config.Meta.Hostname = *hostname + } + + // Validate the configuration. + if err := config.Validate(); err != nil { + return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`.", err) + } + + toml.NewEncoder(cmd.Stdout).Encode(config) + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +// ParseConfig parses the config at path. +// Returns a demo configuration if path is blank. +func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) { + if path == "" { + return NewDemoConfig() + } + + config := NewConfig() + if _, err := toml.DecodeFile(path, &config); err != nil { + return nil, err + } + return config, nil +} + +var printConfigUsage = `usage: config + + config displays the default configuration +` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go new file mode 100644 index 000000000..7ecbb4f65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go @@ -0,0 +1,142 @@ +package run_test + +import ( + "os" + "testing" + + "github.com/BurntSushi/toml" + "github.com/influxdb/influxdb/cmd/influxd/run" +) + +// Ensure the configuration can be parsed. +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c run.Config + if _, err := toml.Decode(` +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[cluster] + +[admin] +bind-address = ":8083" + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" + +[[graphite]] +protocol = "tcp" + +[collectd] +bind-address = ":1000" + +[opentsdb] +bind-address = ":2000" + +[[udp]] +bind-address = ":4444" + +[monitoring] +enabled = true + +[continuous_queries] +enabled = true +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Meta.Dir != "/tmp/meta" { + t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) + } else if c.Data.Dir != "/tmp/data" { + t.Fatalf("unexpected data dir: %s", c.Data.Dir) + } else if c.Admin.BindAddress != ":8083" { + t.Fatalf("unexpected admin bind address: %s", c.Admin.BindAddress) + } else if c.HTTPD.BindAddress != ":8087" { + t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) + } else if len(c.Graphites) != 2 { + t.Fatalf("unexpected graphites count: %d", len(c.Graphites)) + } else if c.Graphites[0].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) + } else if c.Graphites[1].Protocol != "tcp" { + t.Fatalf("unexpected graphite protocol(1): %s", c.Graphites[1].Protocol) + } else if c.Collectd.BindAddress != ":1000" { + t.Fatalf("unexpected collectd bind address: %s", c.Collectd.BindAddress) + } else if c.OpenTSDB.BindAddress != ":2000" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDB.BindAddress) + } else if c.UDPs[0].BindAddress != ":4444" { + t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) + } else if c.ContinuousQuery.Enabled != true { + t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) + } +} + +// Ensure the configuration can be parsed. +func TestConfig_Parse_EnvOverride(t *testing.T) { + // Parse configuration. + var c run.Config + if _, err := toml.Decode(` +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[cluster] + +[admin] +bind-address = ":8083" + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" + +[[graphite]] +protocol = "tcp" + +[collectd] +bind-address = ":1000" + +[opentsdb] +bind-address = ":2000" + +[[udp]] +bind-address = ":4444" + +[monitoring] +enabled = true + +[continuous_queries] +enabled = true +`, &c); err != nil { + t.Fatal(err) + } + + if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := c.ApplyEnvOverrides(); err != nil { + t.Fatalf("failed to apply env overrides: %v", err) + } + + if c.UDPs[0].BindAddress != ":4444" { + t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) + } + + if c.Graphites[1].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go new file mode 100644 index 000000000..068868b36 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go @@ -0,0 +1,585 @@ +package run + +import ( + "bytes" + "fmt" + "log" + "net" + "net/http" + "os" + "runtime" + "runtime/pprof" + "strings" + "time" + + "github.com/influxdb/influxdb/cluster" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/monitor" + "github.com/influxdb/influxdb/services/admin" + "github.com/influxdb/influxdb/services/collectd" + "github.com/influxdb/influxdb/services/continuous_querier" + "github.com/influxdb/influxdb/services/copier" + "github.com/influxdb/influxdb/services/graphite" + "github.com/influxdb/influxdb/services/hh" + "github.com/influxdb/influxdb/services/httpd" + "github.com/influxdb/influxdb/services/opentsdb" + "github.com/influxdb/influxdb/services/precreator" + "github.com/influxdb/influxdb/services/retention" + "github.com/influxdb/influxdb/services/snapshotter" + "github.com/influxdb/influxdb/services/udp" + "github.com/influxdb/influxdb/tcp" + "github.com/influxdb/influxdb/tsdb" + _ "github.com/influxdb/influxdb/tsdb/engine" +) + +// BuildInfo represents the build details for the server code. +type BuildInfo struct { + Version string + Commit string + Branch string +} + +// Server represents a container for the metadata and storage data and services. +// It is built using a Config and it manages the startup and shutdown of all +// services in the proper order. +type Server struct { + buildInfo BuildInfo + + err chan error + closing chan struct{} + + Hostname string + BindAddress string + Listener net.Listener + + MetaStore *meta.Store + TSDBStore *tsdb.Store + QueryExecutor *tsdb.QueryExecutor + PointsWriter *cluster.PointsWriter + ShardWriter *cluster.ShardWriter + ShardMapper *cluster.ShardMapper + HintedHandoff *hh.Service + + Services []Service + + // These references are required for the tcp muxer. + ClusterService *cluster.Service + SnapshotterService *snapshotter.Service + CopierService *copier.Service + + Monitor *monitor.Monitor + + // Server reporting + reportingDisabled bool + + // Profiling + CPUProfile string + MemProfile string +} + +// NewServer returns a new instance of Server built from a config. +func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { + // Construct base meta store and data store. + tsdbStore := tsdb.NewStore(c.Data.Dir) + tsdbStore.EngineOptions.Config = c.Data + + s := &Server{ + buildInfo: *buildInfo, + err: make(chan error), + closing: make(chan struct{}), + + Hostname: c.Meta.Hostname, + BindAddress: c.Meta.BindAddress, + + MetaStore: meta.NewStore(c.Meta), + TSDBStore: tsdbStore, + + Monitor: monitor.New(c.Monitor), + + reportingDisabled: c.ReportingDisabled, + } + + // Copy TSDB configuration. + s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize + s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) + s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) + + // Set the shard mapper + s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) + s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping + s.ShardMapper.MetaStore = s.MetaStore + s.ShardMapper.TSDBStore = s.TSDBStore + + // Initialize query executor. + s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) + s.QueryExecutor.MetaStore = s.MetaStore + s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore} + s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} + s.QueryExecutor.ShardMapper = s.ShardMapper + + // Set the shard writer + s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout)) + s.ShardWriter.MetaStore = s.MetaStore + + // Create the hinted handoff service + s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter) + + // Initialize points writer. + s.PointsWriter = cluster.NewPointsWriter() + s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) + s.PointsWriter.MetaStore = s.MetaStore + s.PointsWriter.TSDBStore = s.TSDBStore + s.PointsWriter.ShardWriter = s.ShardWriter + s.PointsWriter.HintedHandoff = s.HintedHandoff + + // Initialize the monitor + s.Monitor.Version = s.buildInfo.Version + s.Monitor.Commit = s.buildInfo.Commit + s.Monitor.Branch = s.buildInfo.Branch + s.Monitor.MetaStore = s.MetaStore + s.Monitor.PointsWriter = s.PointsWriter + + // Append services. + s.appendClusterService(c.Cluster) + s.appendPrecreatorService(c.Precreator) + s.appendSnapshotterService() + s.appendCopierService() + s.appendAdminService(c.Admin) + s.appendContinuousQueryService(c.ContinuousQuery) + s.appendHTTPDService(c.HTTPD) + s.appendCollectdService(c.Collectd) + if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil { + return nil, err + } + for _, g := range c.UDPs { + s.appendUDPService(g) + } + s.appendRetentionPolicyService(c.Retention) + for _, g := range c.Graphites { + if err := s.appendGraphiteService(g); err != nil { + return nil, err + } + } + + return s, nil +} + +func (s *Server) appendClusterService(c cluster.Config) { + srv := cluster.NewService(c) + srv.TSDBStore = s.TSDBStore + srv.MetaStore = s.MetaStore + s.Services = append(s.Services, srv) + s.ClusterService = srv +} + +func (s *Server) appendSnapshotterService() { + srv := snapshotter.NewService() + srv.TSDBStore = s.TSDBStore + srv.MetaStore = s.MetaStore + s.Services = append(s.Services, srv) + s.SnapshotterService = srv +} + +func (s *Server) appendCopierService() { + srv := copier.NewService() + srv.TSDBStore = s.TSDBStore + s.Services = append(s.Services, srv) + s.CopierService = srv +} + +func (s *Server) appendRetentionPolicyService(c retention.Config) { + if !c.Enabled { + return + } + srv := retention.NewService(c) + srv.MetaStore = s.MetaStore + srv.TSDBStore = s.TSDBStore + s.Services = append(s.Services, srv) +} + +func (s *Server) appendAdminService(c admin.Config) { + if !c.Enabled { + return + } + srv := admin.NewService(c) + s.Services = append(s.Services, srv) +} + +func (s *Server) appendHTTPDService(c httpd.Config) { + if !c.Enabled { + return + } + srv := httpd.NewService(c) + srv.Handler.MetaStore = s.MetaStore + srv.Handler.QueryExecutor = s.QueryExecutor + srv.Handler.PointsWriter = s.PointsWriter + srv.Handler.Version = s.buildInfo.Version + + // If a ContinuousQuerier service has been started, attach it. + for _, srvc := range s.Services { + if cqsrvc, ok := srvc.(continuous_querier.ContinuousQuerier); ok { + srv.Handler.ContinuousQuerier = cqsrvc + } + } + + s.Services = append(s.Services, srv) +} + +func (s *Server) appendCollectdService(c collectd.Config) { + if !c.Enabled { + return + } + srv := collectd.NewService(c) + srv.MetaStore = s.MetaStore + srv.PointsWriter = s.PointsWriter + s.Services = append(s.Services, srv) +} + +func (s *Server) appendOpenTSDBService(c opentsdb.Config) error { + if !c.Enabled { + return nil + } + srv, err := opentsdb.NewService(c) + if err != nil { + return err + } + srv.PointsWriter = s.PointsWriter + srv.MetaStore = s.MetaStore + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendGraphiteService(c graphite.Config) error { + if !c.Enabled { + return nil + } + srv, err := graphite.NewService(c) + if err != nil { + return err + } + + srv.PointsWriter = s.PointsWriter + srv.MetaStore = s.MetaStore + srv.Monitor = s.Monitor + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendPrecreatorService(c precreator.Config) error { + if !c.Enabled { + return nil + } + srv, err := precreator.NewService(c) + if err != nil { + return err + } + + srv.MetaStore = s.MetaStore + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendUDPService(c udp.Config) { + if !c.Enabled { + return + } + srv := udp.NewService(c) + srv.PointsWriter = s.PointsWriter + s.Services = append(s.Services, srv) +} + +func (s *Server) appendContinuousQueryService(c continuous_querier.Config) { + if !c.Enabled { + return + } + srv := continuous_querier.NewService(c) + srv.MetaStore = s.MetaStore + srv.QueryExecutor = s.QueryExecutor + srv.PointsWriter = s.PointsWriter + s.Services = append(s.Services, srv) +} + +// Err returns an error channel that multiplexes all out of band errors received from all services. +func (s *Server) Err() <-chan error { return s.err } + +// Open opens the meta and data store and all services. +func (s *Server) Open() error { + if err := func() error { + // Start profiling, if set. + startProfile(s.CPUProfile, s.MemProfile) + + host, port, err := s.hostAddr() + if err != nil { + return err + } + + hostport := net.JoinHostPort(host, port) + addr, err := net.ResolveTCPAddr("tcp", hostport) + if err != nil { + return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err) + } + s.MetaStore.Addr = addr + s.MetaStore.RemoteAddr = &tcpaddr{hostport} + + // Open shared TCP connection. + ln, err := net.Listen("tcp", s.BindAddress) + if err != nil { + return fmt.Errorf("listen: %s", err) + } + s.Listener = ln + + // The port 0 is used, we need to retrieve the port assigned by the kernel + if strings.HasSuffix(s.BindAddress, ":0") { + s.MetaStore.Addr = ln.Addr() + } + + // Multiplex listener. + mux := tcp.NewMux() + s.MetaStore.RaftListener = mux.Listen(meta.MuxRaftHeader) + s.MetaStore.ExecListener = mux.Listen(meta.MuxExecHeader) + s.MetaStore.RPCListener = mux.Listen(meta.MuxRPCHeader) + + s.ClusterService.Listener = mux.Listen(cluster.MuxHeader) + s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader) + s.CopierService.Listener = mux.Listen(copier.MuxHeader) + go mux.Serve(ln) + + // Open meta store. + if err := s.MetaStore.Open(); err != nil { + return fmt.Errorf("open meta store: %s", err) + } + go s.monitorErrorChan(s.MetaStore.Err()) + + // Wait for the store to initialize. + <-s.MetaStore.Ready() + + if err := s.Monitor.Open(); err != nil { + return fmt.Errorf("open monitor: %v", err) + } + + // Open TSDB store. + if err := s.TSDBStore.Open(); err != nil { + return fmt.Errorf("open tsdb store: %s", err) + } + + // Open the hinted handoff service + if err := s.HintedHandoff.Open(); err != nil { + return fmt.Errorf("open hinted handoff: %s", err) + } + + for _, service := range s.Services { + if err := service.Open(); err != nil { + return fmt.Errorf("open service: %s", err) + } + } + + // Start the reporting service, if not disabled. + if !s.reportingDisabled { + go s.startServerReporting() + } + + return nil + + }(); err != nil { + s.Close() + return err + } + + return nil +} + +// Close shuts down the meta and data stores and all services. +func (s *Server) Close() error { + stopProfile() + + // Close the listener first to stop any new connections + if s.Listener != nil { + s.Listener.Close() + } + + // Close services to allow any inflight requests to complete + // and prevent new requests from being accepted. + for _, service := range s.Services { + service.Close() + } + + if s.Monitor != nil { + s.Monitor.Close() + } + + if s.HintedHandoff != nil { + s.HintedHandoff.Close() + } + + // Close the TSDBStore, no more reads or writes at this point + if s.TSDBStore != nil { + s.TSDBStore.Close() + } + + // Finally close the meta-store since everything else depends on it + if s.MetaStore != nil { + s.MetaStore.Close() + } + + close(s.closing) + return nil +} + +// startServerReporting starts periodic server reporting. +func (s *Server) startServerReporting() { + for { + select { + case <-s.closing: + return + default: + } + if err := s.MetaStore.WaitForLeader(30 * time.Second); err != nil { + log.Printf("no leader available for reporting: %s", err.Error()) + time.Sleep(time.Second) + continue + } + s.reportServer() + <-time.After(24 * time.Hour) + } +} + +// reportServer reports anonymous statistics about the system. +func (s *Server) reportServer() { + dis, err := s.MetaStore.Databases() + if err != nil { + log.Printf("failed to retrieve databases for reporting: %s", err.Error()) + return + } + numDatabases := len(dis) + + numMeasurements := 0 + numSeries := 0 + for _, di := range dis { + d := s.TSDBStore.DatabaseIndex(di.Name) + if d == nil { + // No data in this store for this database. + continue + } + m, s := d.MeasurementSeriesCounts() + numMeasurements += m + numSeries += s + } + + clusterID, err := s.MetaStore.ClusterID() + if err != nil { + log.Printf("failed to retrieve cluster ID for reporting: %s", err.Error()) + return + } + + json := fmt.Sprintf(`[{ + "name":"reports", + "columns":["os", "arch", "version", "server_id", "cluster_id", "num_series", "num_measurements", "num_databases"], + "points":[["%s", "%s", "%s", "%x", "%x", "%d", "%d", "%d"]] + }]`, runtime.GOOS, runtime.GOARCH, s.buildInfo.Version, s.MetaStore.NodeID(), clusterID, numSeries, numMeasurements, numDatabases) + + data := bytes.NewBufferString(json) + + log.Printf("Sending anonymous usage statistics to m.influxdb.com") + + client := http.Client{Timeout: time.Duration(5 * time.Second)} + go client.Post("http://m.influxdb.com:8086/db/reporting/series?u=reporter&p=influxdb", "application/json", data) +} + +// monitorErrorChan reads an error channel and resends it through the server. +func (s *Server) monitorErrorChan(ch <-chan error) { + for { + select { + case err, ok := <-ch: + if !ok { + return + } + s.err <- err + case <-s.closing: + return + } + } +} + +// hostAddr returns the host and port that remote nodes will use to reach this +// node. +func (s *Server) hostAddr() (string, string, error) { + // Resolve host to address. + _, port, err := net.SplitHostPort(s.BindAddress) + if err != nil { + return "", "", fmt.Errorf("split bind address: %s", err) + } + + host := s.Hostname + + // See if we might have a port that will override the BindAddress port + if host != "" && host[len(host)-1] >= '0' && host[len(host)-1] <= '9' && strings.Contains(host, ":") { + hostArg, portArg, err := net.SplitHostPort(s.Hostname) + if err != nil { + return "", "", err + } + + if hostArg != "" { + host = hostArg + } + + if portArg != "" { + port = portArg + } + } + return host, port, nil +} + +// Service represents a service attached to the server. +type Service interface { + Open() error + Close() error +} + +// prof stores the file locations of active profiles. +var prof struct { + cpu *os.File + mem *os.File +} + +// StartProfile initializes the cpu and memory profile, if specified. +func startProfile(cpuprofile, memprofile string) { + if cpuprofile != "" { + f, err := os.Create(cpuprofile) + if err != nil { + log.Fatalf("cpuprofile: %v", err) + } + log.Printf("writing CPU profile to: %s\n", cpuprofile) + prof.cpu = f + pprof.StartCPUProfile(prof.cpu) + } + + if memprofile != "" { + f, err := os.Create(memprofile) + if err != nil { + log.Fatalf("memprofile: %v", err) + } + log.Printf("writing mem profile to: %s\n", memprofile) + prof.mem = f + runtime.MemProfileRate = 4096 + } + +} + +// StopProfile closes the cpu and memory profiles if they are running. +func stopProfile() { + if prof.cpu != nil { + pprof.StopCPUProfile() + prof.cpu.Close() + log.Println("CPU profile stopped") + } + if prof.mem != nil { + pprof.Lookup("heap").WriteTo(prof.mem, 0) + prof.mem.Close() + log.Println("mem profile stopped") + } +} + +type tcpaddr struct{ host string } + +func (a *tcpaddr) Network() string { return "tcp" } +func (a *tcpaddr) String() string { return a.host } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go new file mode 100644 index 000000000..aeb81826d --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go @@ -0,0 +1,356 @@ +// This package is a set of convenience helpers and structs to make integration testing easier +package run_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "testing" + "time" + + "github.com/influxdb/influxdb/cmd/influxd/run" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/services/httpd" + "github.com/influxdb/influxdb/toml" +) + +// Server represents a test wrapper for run.Server. +type Server struct { + *run.Server + Config *run.Config +} + +// NewServer returns a new instance of Server. +func NewServer(c *run.Config) *Server { + buildInfo := &run.BuildInfo{ + Version: "testServer", + Commit: "testCommit", + Branch: "testBranch", + } + srv, _ := run.NewServer(c, buildInfo) + s := Server{ + Server: srv, + Config: c, + } + s.TSDBStore.EngineOptions.Config = c.Data + configureLogging(&s) + return &s +} + +// OpenServer opens a test server. +func OpenServer(c *run.Config, joinURLs string) *Server { + s := NewServer(c) + configureLogging(s) + if err := s.Open(); err != nil { + panic(err.Error()) + } + + return s +} + +// OpenServerWithVersion opens a test server with a specific version. +func OpenServerWithVersion(c *run.Config, version string) *Server { + buildInfo := &run.BuildInfo{ + Version: version, + Commit: "", + Branch: "", + } + srv, _ := run.NewServer(c, buildInfo) + s := Server{ + Server: srv, + Config: c, + } + configureLogging(&s) + if err := s.Open(); err != nil { + panic(err.Error()) + } + + return &s +} + +// Close shuts down the server and removes all temporary paths. +func (s *Server) Close() { + os.RemoveAll(s.Config.Meta.Dir) + os.RemoveAll(s.Config.Data.Dir) + os.RemoveAll(s.Config.HintedHandoff.Dir) + s.Server.Close() +} + +// URL returns the base URL for the httpd endpoint. +func (s *Server) URL() string { + for _, service := range s.Services { + if service, ok := service.(*httpd.Service); ok { + return "http://" + service.Addr().String() + } + } + panic("httpd server not found in services") +} + +// CreateDatabaseAndRetentionPolicy will create the database and retention policy. +func (s *Server) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicyInfo) error { + if _, err := s.MetaStore.CreateDatabase(db); err != nil { + return err + } else if _, err := s.MetaStore.CreateRetentionPolicy(db, rp); err != nil { + return err + } + return nil +} + +// Query executes a query against the server and returns the results. +func (s *Server) Query(query string) (results string, err error) { + return s.QueryWithParams(query, nil) +} + +// Query executes a query against the server and returns the results. +func (s *Server) QueryWithParams(query string, values url.Values) (results string, err error) { + if values == nil { + values = url.Values{} + } + values.Set("q", query) + return s.HTTPGet(s.URL() + "/query?" + values.Encode()) +} + +// HTTPGet makes an HTTP GET request to the server and returns the response. +func (s *Server) HTTPGet(url string) (results string, err error) { + resp, err := http.Get(url) + if err != nil { + return "", err + } + body := string(MustReadAll(resp.Body)) + switch resp.StatusCode { + case http.StatusBadRequest: + if !expectPattern(".*error parsing query*.", body) { + return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) + } + return body, nil + case http.StatusOK: + return body, nil + default: + return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) + } +} + +// HTTPPost makes an HTTP POST request to the server and returns the response. +func (s *Server) HTTPPost(url string, content []byte) (results string, err error) { + buf := bytes.NewBuffer(content) + resp, err := http.Post(url, "application/json", buf) + if err != nil { + return "", err + } + body := string(MustReadAll(resp.Body)) + switch resp.StatusCode { + case http.StatusBadRequest: + if !expectPattern(".*error parsing query*.", body) { + return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) + } + return body, nil + case http.StatusOK, http.StatusNoContent: + return body, nil + default: + return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) + } +} + +// Write executes a write against the server and returns the results. +func (s *Server) Write(db, rp, body string, params url.Values) (results string, err error) { + if params == nil { + params = url.Values{} + } + if params.Get("db") == "" { + params.Set("db", db) + } + if params.Get("rp") == "" { + params.Set("rp", rp) + } + resp, err := http.Post(s.URL()+"/write?"+params.Encode(), "", strings.NewReader(body)) + if err != nil { + return "", err + } else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return "", fmt.Errorf("invalid status code: code=%d, body=%s", resp.StatusCode, MustReadAll(resp.Body)) + } + return string(MustReadAll(resp.Body)), nil +} + +// NewConfig returns the default config with temporary paths. +func NewConfig() *run.Config { + c := run.NewConfig() + c.ReportingDisabled = true + c.Cluster.ShardWriterTimeout = toml.Duration(30 * time.Second) + c.Cluster.WriteTimeout = toml.Duration(30 * time.Second) + c.Meta.Dir = MustTempFile() + c.Meta.BindAddress = "127.0.0.1:0" + c.Meta.HeartbeatTimeout = toml.Duration(50 * time.Millisecond) + c.Meta.ElectionTimeout = toml.Duration(50 * time.Millisecond) + c.Meta.LeaderLeaseTimeout = toml.Duration(50 * time.Millisecond) + c.Meta.CommitTimeout = toml.Duration(5 * time.Millisecond) + + c.Data.Dir = MustTempFile() + c.Data.WALDir = MustTempFile() + c.Data.WALLoggingEnabled = false + + c.HintedHandoff.Dir = MustTempFile() + + c.HTTPD.Enabled = true + c.HTTPD.BindAddress = "127.0.0.1:0" + c.HTTPD.LogEnabled = testing.Verbose() + + c.Monitor.StoreEnabled = false + + return c +} + +func newRetentionPolicyInfo(name string, rf int, duration time.Duration) *meta.RetentionPolicyInfo { + return &meta.RetentionPolicyInfo{Name: name, ReplicaN: rf, Duration: duration} +} + +func maxFloat64() string { + maxFloat64, _ := json.Marshal(math.MaxFloat64) + return string(maxFloat64) +} + +func maxInt64() string { + maxInt64, _ := json.Marshal(^int64(0)) + return string(maxInt64) +} + +func now() time.Time { + return time.Now().UTC() +} + +func yesterday() time.Time { + return now().Add(-1 * time.Hour * 24) +} + +func mustParseTime(layout, value string) time.Time { + tm, err := time.Parse(layout, value) + if err != nil { + panic(err) + } + return tm +} + +// MustReadAll reads r. Panic on error. +func MustReadAll(r io.Reader) []byte { + b, err := ioutil.ReadAll(r) + if err != nil { + panic(err) + } + return b +} + +// MustTempFile returns a path to a temporary file. +func MustTempFile() string { + f, err := ioutil.TempFile("", "influxd-") + if err != nil { + panic(err) + } + f.Close() + os.Remove(f.Name()) + return f.Name() +} + +func expectPattern(exp, act string) bool { + re := regexp.MustCompile(exp) + if !re.MatchString(act) { + return false + } + return true +} + +type Query struct { + name string + command string + params url.Values + exp, act string + pattern bool + skip bool + repeat int +} + +// Execute runs the command and returns an err if it fails +func (q *Query) Execute(s *Server) (err error) { + if q.params == nil { + q.act, err = s.Query(q.command) + return + } + q.act, err = s.QueryWithParams(q.command, q.params) + return +} + +func (q *Query) success() bool { + if q.pattern { + return expectPattern(q.exp, q.act) + } + return q.exp == q.act +} + +func (q *Query) Error(err error) string { + return fmt.Sprintf("%s: %v", q.name, err) +} + +func (q *Query) failureMessage() string { + return fmt.Sprintf("%s: unexpected results\nquery: %s\nexp: %s\nactual: %s\n", q.name, q.command, q.exp, q.act) +} + +type Test struct { + initialized bool + write string + params url.Values + db string + rp string + exp string + queries []*Query +} + +func NewTest(db, rp string) Test { + return Test{ + db: db, + rp: rp, + } +} + +func (t *Test) addQueries(q ...*Query) { + t.queries = append(t.queries, q...) +} + +func (t *Test) init(s *Server) error { + if t.write == "" || t.initialized { + return nil + } + t.initialized = true + if res, err := s.Write(t.db, t.rp, t.write, t.params); err != nil { + return err + } else if t.exp != res { + return fmt.Errorf("unexpected results\nexp: %s\ngot: %s\n", t.exp, res) + } + return nil +} + +func configureLogging(s *Server) { + // Set the logger to discard unless verbose is on + if !testing.Verbose() { + type logSetter interface { + SetLogger(*log.Logger) + } + nullLogger := log.New(ioutil.Discard, "", 0) + s.MetaStore.Logger = nullLogger + s.TSDBStore.Logger = nullLogger + s.HintedHandoff.SetLogger(nullLogger) + s.Monitor.SetLogger(nullLogger) + s.QueryExecutor.SetLogger(nullLogger) + for _, service := range s.Services { + if service, ok := service.(logSetter); ok { + service.SetLogger(nullLogger) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go new file mode 100644 index 000000000..6639f378f --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go @@ -0,0 +1,4374 @@ +package run_test + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "testing" + "time" +) + +// Ensure that HTTP responses include the InfluxDB version. +func TestServer_HTTPResponseVersion(t *testing.T) { + version := "v1234" + s := OpenServerWithVersion(NewConfig(), version) + defer s.Close() + + resp, _ := http.Get(s.URL() + "/query") + got := resp.Header.Get("X-Influxdb-Version") + if got != version { + t.Errorf("Server responded with incorrect version, exp %s, got %s", version, got) + } +} + +// Ensure the database commands work. +func TestServer_DatabaseCommands(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + test := Test{ + queries: []*Query{ + &Query{ + name: "create database should succeed", + command: `CREATE DATABASE db0`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create database should error with bad name", + command: `CREATE DATABASE 0xdb0`, + exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 17"}`, + }, + &Query{ + name: "show database should succeed", + command: `SHOW DATABASES`, + exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"]]}]}]}`, + }, + &Query{ + name: "create database should error if it already exists", + command: `CREATE DATABASE db0`, + exp: `{"results":[{"error":"database already exists"}]}`, + }, + &Query{ + name: "create database should not error with existing database with IF NOT EXISTS", + command: `CREATE DATABASE IF NOT EXISTS db0`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create database should create non-existing database with IF NOT EXISTS", + command: `CREATE DATABASE IF NOT EXISTS db1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show database should succeed", + command: `SHOW DATABASES`, + exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db1"]]}]}]}`, + }, + &Query{ + name: "drop database db0 should succeed", + command: `DROP DATABASE db0`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "drop database db1 should succeed", + command: `DROP DATABASE db1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show database should have no results", + command: `SHOW DATABASES`, + exp: `{"results":[{"series":[{"name":"databases","columns":["name"]}]}]}`, + }, + &Query{ + name: "drop database should error if it doesn't exist", + command: `DROP DATABASE db0`, + exp: `{"results":[{"error":"database not found: db0"}]}`, + }, + }, + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_DropAndRecreateDatabase(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "Drop database after data write", + command: `DROP DATABASE db0`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "Recreate database", + command: `CREATE DATABASE db0`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "Recreate retention policy", + command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 365d REPLICATION 1 DEFAULT`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "Show measurements after recreate", + command: `SHOW MEASUREMENTS`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Query data after recreate", + command: `SELECT * FROM cpu`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_DropDatabaseIsolated(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "Query data from 1st database", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Query data from 1st database with GROUP BY *", + command: `SELECT * FROM cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop other database", + command: `DROP DATABASE db1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "Query data from 1st database and ensure it's still there", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Query data from 1st database and ensure it's still there with GROUP BY *", + command: `SELECT * FROM cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_DropAndRecreateSeries(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "Show series is present", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop series after data write", + command: `DROP SERIES FROM cpu`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Show series is gone", + command: `SHOW SERIES`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + + // Re-write data and test again. + reTest := NewTest("db0", "rp0") + reTest.write = strings.Join(writes, "\n") + + reTest.addQueries([]*Query{ + &Query{ + name: "Show series is present again after re-write", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range reTest.queries { + if i == 0 { + if err := reTest.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure retention policy commands work. +func TestServer_RetentionPolicyCommands(t *testing.T) { + t.Parallel() + c := NewConfig() + c.Meta.RetentionAutoCreate = false + s := OpenServer(c, "") + defer s.Close() + + // Create a database. + if _, err := s.MetaStore.CreateDatabase("db0"); err != nil { + t.Fatal(err) + } + + test := Test{ + queries: []*Query{ + &Query{ + name: "create retention policy should succeed", + command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create retention policy should error if it already exists", + command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`, + exp: `{"results":[{"error":"retention policy already exists"}]}`, + }, + &Query{ + name: "show retention policy should succeed", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","1h0m0s",1,false]]}]}]}`, + }, + &Query{ + name: "alter retention policy should succeed", + command: `ALTER RETENTION POLICY rp0 ON db0 DURATION 2h REPLICATION 3 DEFAULT`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show retention policy should have new altered information", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, + }, + &Query{ + name: "dropping default retention policy should not succeed", + command: `DROP RETENTION POLICY rp0 ON db0`, + exp: `{"results":[{"error":"retention policy is default"}]}`, + }, + &Query{ + name: "show retention policy should still show policy", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, + }, + &Query{ + name: "create a second non-default retention policy", + command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show retention policy should show both", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true],["rp2","1h0m0s",1,false]]}]}]}`, + }, + &Query{ + name: "dropping non-default retention policy succeed", + command: `DROP RETENTION POLICY rp2 ON db0`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show retention policy should show just default", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, + }, + &Query{ + name: "Ensure retention policy with unacceptable retention cannot be created", + command: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1s REPLICATION 1`, + exp: `{"results":[{"error":"retention policy duration must be at least 1h0m0s"}]}`, + }, + &Query{ + name: "Check error when deleting retention policy on non-existent database", + command: `DROP RETENTION POLICY rp1 ON mydatabase`, + exp: `{"results":[{"error":"database not found"}]}`, + }, + }, + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the autocreation of retention policy works. +func TestServer_DatabaseRetentionPolicyAutoCreate(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + test := Test{ + queries: []*Query{ + &Query{ + name: "create database should succeed", + command: `CREATE DATABASE db0`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show retention policies should return auto-created policy", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,true]]}]}]}`, + }, + }, + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure user commands work. +func TestServer_UserCommands(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + // Create a database. + if _, err := s.MetaStore.CreateDatabase("db0"); err != nil { + t.Fatal(err) + } + + test := Test{ + queries: []*Query{ + &Query{ + name: "show users, no actual users", + command: `SHOW USERS`, + exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, + }, + &Query{ + name: `create user`, + command: "CREATE USER jdoe WITH PASSWORD '1337'", + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show users, 1 existing user", + command: `SHOW USERS`, + exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",false]]}]}]}`, + }, + &Query{ + name: "grant all priviledges to jdoe", + command: `GRANT ALL PRIVILEGES TO jdoe`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show users, existing user as admin", + command: `SHOW USERS`, + exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",true]]}]}]}`, + }, + &Query{ + name: "grant DB privileges to user", + command: `GRANT READ ON db0 TO jdoe`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "revoke all privileges", + command: `REVOKE ALL PRIVILEGES FROM jdoe`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "bad create user request", + command: `CREATE USER 0xBAD WITH PASSWORD pwd1337`, + exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 13"}`, + }, + &Query{ + name: "bad create user request, no name", + command: `CREATE USER WITH PASSWORD pwd1337`, + exp: `{"error":"error parsing query: found WITH, expected identifier at line 1, char 13"}`, + }, + &Query{ + name: "bad create user request, no password", + command: `CREATE USER jdoe`, + exp: `{"error":"error parsing query: found EOF, expected WITH at line 1, char 18"}`, + }, + &Query{ + name: "drop user", + command: `DROP USER jdoe`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "make sure user was dropped", + command: `SHOW USERS`, + exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, + }, + &Query{ + name: "delete non existing user", + command: `DROP USER noone`, + exp: `{"results":[{"error":"user not found"}]}`, + }, + }, + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(fmt.Sprintf("command: %s - err: %s", query.command, query.Error(err))) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can create a single point via json protocol and read it back. +func TestServer_Write_JSON(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("", "", fmt.Sprintf(`{"database" : "db0", "retentionPolicy" : "rp0", "points": [{"measurement": "cpu", "tags": {"host": "server02"},"fields": {"value": 1.0}}],"time":"%s"} `, now.Format(time.RFC3339Nano)), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can create a single point via line protocol with float type and read it back. +func TestServer_Write_LineProtocol_Float(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=1.0 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can create a single point via line protocol with bool type and read it back. +func TestServer_Write_LineProtocol_Bool(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=true `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",true]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can create a single point via line protocol with string type and read it back. +func TestServer_Write_LineProtocol_String(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("db0", "rp0", `cpu,host=server01 value="disk full" `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s","disk full"]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can create a single point via line protocol with integer type and read it back. +func TestServer_Write_LineProtocol_Integer(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=100 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can query with default databases (via param) and default retention policy +func TestServer_Query_DefaultDBAndRP(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.write = fmt.Sprintf(`cpu value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()) + + test.addQueries([]*Query{ + &Query{ + name: "default db and rp", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "default rp exists", + command: `show retention policies ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,false],["rp0","1h0m0s",1,true]]}]}]}`, + }, + &Query{ + name: "default rp", + command: `SELECT * FROM db0..cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "default dp", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM rp0.cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can have a database with multiple measurements. +func TestServer_Query_Multiple_Measurements(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + // Make sure we do writes for measurements that will span across shards + writes := []string{ + fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "measurement in one shard but not another shouldn't panic server", + command: `SELECT host,value FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, + }, + &Query{ + name: "measurement in one shard but not another shouldn't panic server", + command: `SELECT host,value FROM db0.rp0.cpu GROUP BY host`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server correctly supports data with identical tag values. +func TestServer_Query_IdenticalTagValues(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf("cpu,t1=val1 value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu,t2=val2 value=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + fmt.Sprintf("cpu,t1=val2 value=3 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:02:00Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "measurements with identical tag values - SELECT *, no GROUP BY", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]},{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]}]}]}`, + }, + &Query{ + name: "measurements with identical tag values - SELECT *, with GROUP BY", + command: `SELECT value FROM db0.rp0.cpu GROUP BY t1,t2`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]},{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]}]}]}`, + }, + &Query{ + name: "measurements with identical tag values - SELECT value no GROUP BY", + command: `SELECT value FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:01:00Z",2],["2000-01-01T00:02:00Z",3]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle a query that involves accessing no shards. +func TestServer_Query_NoShards(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.write = `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10) + + test.addQueries([]*Query{ + &Query{ + name: "selecting value should succeed", + command: `SELECT value FROM db0.rp0.cpu WHERE time < now() - 1d`, + exp: `{"results":[{}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query a non-existent field +func TestServer_Query_NonExistent(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.write = `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10) + + test.addQueries([]*Query{ + &Query{ + name: "selecting value should succeed", + command: `SELECT value FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "selecting non-existent should succeed", + command: `SELECT foo FROM db0.rp0.cpu`, + exp: `{"results":[{}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can perform basic math +func TestServer_Query_Math(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db", newRetentionPolicyInfo("rp", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + writes := []string{ + "float value=42 " + strconv.FormatInt(now.UnixNano(), 10), + "integer value=42i " + strconv.FormatInt(now.UnixNano(), 10), + } + + test := NewTest("db", "rp") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "SELECT multiple of float value", + command: `SELECT value * 2 from db.rp.float`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT multiple of float value", + command: `SELECT 2 * value from db.rp.float`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT multiple of integer value", + command: `SELECT value * 2 from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT float multiple of integer value", + command: `SELECT value * 2.0 from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query with the count aggregate function +func TestServer_Query_Count(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10) + + hour_ago := now.Add(-time.Hour).UTC() + + test.addQueries([]*Query{ + &Query{ + name: "selecting count(value) should succeed", + command: `SELECT count(value) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "selecting count(value) with where time should return result", + command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",1]]}]}]}`, hour_ago.Format(time.RFC3339Nano)), + }, + &Query{ + name: "selecting count(*) should error", + command: `SELECT count(*) FROM db0.rp0.cpu`, + exp: `{"error":"error parsing query: expected field argument in count()"}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query with Now(). +func TestServer_Query_Now(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10) + + test.addQueries([]*Query{ + &Query{ + name: "where with time < now() should work", + command: `SELECT * FROM db0.rp0.cpu where time < now()`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "where with time < now() and GROUP BY * should work", + command: `SELECT * FROM db0.rp0.cpu where time < now() GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "where with time > now() should return an empty result", + command: `SELECT * FROM db0.rp0.cpu where time > now()`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "where with time > now() with GROUP BY * should return an empty result", + command: `SELECT * FROM db0.rp0.cpu where time > now() GROUP BY *`, + exp: `{"results":[{}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query with epoch precisions. +func TestServer_Query_EpochPrecision(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10) + + test.addQueries([]*Query{ + &Query{ + name: "nanosecond precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"n"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()), + }, + &Query{ + name: "microsecond precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"u"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Microsecond)), + }, + &Query{ + name: "millisecond precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"ms"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Millisecond)), + }, + &Query{ + name: "second precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"s"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Second)), + }, + &Query{ + name: "minute precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"m"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Minute)), + }, + &Query{ + name: "hour precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"h"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Hour)), + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server works with tag queries. +func TestServer_Query_Tags(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + writes := []string{ + fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", now.UnixNano()), + fmt.Sprintf("cpu,host=server02 value=50,core=2 %d", now.Add(1).UnixNano()), + + fmt.Sprintf("cpu1,host=server01,region=us-west value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf("cpu1,host=server02 value=200 %d", mustParseTime(time.RFC3339Nano, "2010-02-28T01:03:37.703820946Z").UnixNano()), + fmt.Sprintf("cpu1,host=server03 value=300 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), + + fmt.Sprintf("cpu2,host=server01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf("cpu2 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), + + fmt.Sprintf("cpu3,company=acme01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf("cpu3 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "tag without field should return error", + command: `SELECT host FROM db0.rp0.cpu`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + }, + &Query{ + name: "field with tag should succeed", + command: `SELECT host, value FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",100],["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "field with tag and GROUP BY should succeed", + command: `SELECT host, value FROM db0.rp0.cpu GROUP BY host`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "field with two tags should succeed", + command: `SELECT host, value, core FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value","core"],"values":[["%s","server01",100,4],["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "field with two tags and GROUP BY should succeed", + command: `SELECT host, value, core FROM db0.rp0.cpu GROUP BY host`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value","core"],"values":[["%s",100,4]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value","core"],"values":[["%s",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "select * with tags should succeed", + command: `SELECT * FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","core","host","value"],"values":[["%s",4,"server01",100],["%s",2,"server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "select * with tags with GROUP BY * should succeed", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","core","value"],"values":[["%s",4,100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","core","value"],"values":[["%s",2,50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "group by tag", + command: `SELECT value FROM db0.rp0.cpu GROUP by host`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "single field (EQ tag value1)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (2 EQ tags)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region = 'us-west'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (OR different tags)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server03' OR region = 'us-west'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (OR with non-existent tag value)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server66'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (OR with all tag values)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server02' OR host = 'server03'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (1 EQ and 1 NEQ tag)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region != 'us-west'`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "single field (EQ tag value2)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server02'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1 AND NEQ tag value2)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1 OR NEQ tag value2)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' OR host != 'server02'`, // Yes, this is always true, but that's the point. + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1 AND NEQ tag value2 AND NEQ tag value3)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02' AND host != 'server03'`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "single field (NEQ tag value1, point without any tags)", + command: `SELECT value FROM db0.rp0.cpu2 WHERE host != 'server01'`, + exp: `{"results":[{"series":[{"name":"cpu2","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1, point without any tags)", + command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme01/`, + exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, + }, + &Query{ + name: "single field (regex tag match)", + command: `SELECT value FROM db0.rp0.cpu3 WHERE company =~ /acme01/`, + exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (regex tag match)", + command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme[23]/`, + exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server correctly queries with an alias. +func TestServer_Query_Alias(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf("cpu value=1i,steps=3i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu value=2i,steps=4i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "baseline query - SELECT * FROM db0.rp0.cpu", + command: `SELECT * FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","value"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, + }, + &Query{ + name: "basic query with alias - SELECT steps, value as v FROM db0.rp0.cpu", + command: `SELECT steps, value as v FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","v"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, + }, + &Query{ + name: "double aggregate sum - SELECT sum(value), sum(steps) FROM db0.rp0.cpu", + command: `SELECT sum(value), sum(steps) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, + }, + &Query{ + name: "double aggregate sum reverse order - SELECT sum(steps), sum(value) FROM db0.rp0.cpu", + command: `SELECT sum(steps), sum(value) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum"],"values":[["1970-01-01T00:00:00Z",7,3]]}]}]}`, + }, + &Query{ + name: "double aggregate sum with alias - SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu", + command: `SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sumv","sums"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, + }, + &Query{ + name: "double aggregate with same value - SELECT sum(value), mean(value) FROM db0.rp0.cpu", + command: `SELECT sum(value), mean(value) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",3,1.5]]}]}]}`, + }, + &Query{ + name: "double aggregate with same value and same alias - SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu", + command: `SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mv","mv"],"values":[["1970-01-01T00:00:00Z",1.5,2]]}]}]}`, + }, + &Query{ + name: "double aggregate with non-existent field - SELECT mean(value), max(foo) FROM db0.rp0.cpu", + command: `SELECT mean(value), max(foo) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mean","max"],"values":[["1970-01-01T00:00:00Z",1.5,null]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server will succeed and error for common scenarios. +func TestServer_Query_Common(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.write = fmt.Sprintf("cpu,host=server01 value=1 %s", strconv.FormatInt(now.UnixNano(), 10)) + + test.addQueries([]*Query{ + &Query{ + name: "selecting a from a non-existent database should error", + command: `SELECT value FROM db1.rp0.cpu`, + exp: `{"results":[{"error":"database not found: db1"}]}`, + }, + &Query{ + name: "selecting a from a non-existent retention policy should error", + command: `SELECT value FROM db0.rp1.cpu`, + exp: `{"results":[{"error":"retention policy not found"}]}`, + }, + &Query{ + name: "selecting a valid measurement and field should succeed", + command: `SELECT value FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "explicitly selecting time and a valid measurement and field should succeed", + command: `SELECT time,value FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "selecting a measurement that doesn't exist should result in empty set", + command: `SELECT value FROM db0.rp0.idontexist`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "selecting a field that doesn't exist should result in empty set", + command: `SELECT idontexist FROM db0.rp0.cpu`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "selecting wildcard without specifying a database should error", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"error":"database name required"}]}`, + }, + &Query{ + name: "selecting explicit field without specifying a database should error", + command: `SELECT value FROM cpu`, + exp: `{"results":[{"error":"database name required"}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query two points. +func TestServer_Query_SelectTwoPoints(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.write = fmt.Sprintf("cpu value=100 %s\ncpu value=200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10)) + + test.addQueries( + &Query{ + name: "selecting two points should result in two points", + command: `SELECT * FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "selecting two points with GROUP BY * should result in two points", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + ) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query two negative points. +func TestServer_Query_SelectTwoNegativePoints(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.write = fmt.Sprintf("cpu value=-100 %s\ncpu value=-200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10)) + + test.addQueries(&Query{ + name: "selecting two negative points should succeed", + command: `SELECT * FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",-100],["%s",-200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query with relative time. +func TestServer_Query_SelectRelativeTime(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + yesterday := yesterday() + + test := NewTest("db0", "rp0") + test.write = fmt.Sprintf("cpu,host=server01 value=100 %s\ncpu,host=server01 value=200 %s", strconv.FormatInt(yesterday.UnixNano(), 10), strconv.FormatInt(now.UnixNano(), 10)) + + test.addQueries([]*Query{ + &Query{ + name: "single point with time pre-calculated for past time queries yesterday", + command: `SELECT * FROM db0.rp0.cpu where time >= '` + yesterday.Add(-1*time.Minute).Format(time.RFC3339Nano) + `' GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, yesterday.Format(time.RFC3339Nano), now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "single point with time pre-calculated for relative time queries now", + command: `SELECT * FROM db0.rp0.cpu where time >= now() - 1m GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",200]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle various simple calculus queries. +func TestServer_Query_SelectRawCalculus(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.write = fmt.Sprintf("cpu value=210 1278010021000000000\ncpu value=10 1278010022000000000") + + test.addQueries([]*Query{ + &Query{ + name: "calculate single derivate", + command: `SELECT derivative(value) from db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-200]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// mergeMany ensures that when merging many series together and some of them have a different number +// of points than others in a group by interval the results are correct +func TestServer_Query_MergeMany(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + + writes := []string{} + for i := 1; i < 11; i++ { + for j := 1; j < 5+i%3; j++ { + data := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano()) + writes = append(writes, data) + } + } + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "GROUP by time", + command: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`, + }, + &Query{ + skip: true, + name: "GROUP by tag - FIXME issue #2875", + command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server03"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "GROUP by field", + command: `SELECT count(value) FROM db0.rp0.cpu group by value`, + exp: `{"results":[{"error":"can not use field in GROUP BY clause: value"}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_SLimitAndSOffset(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + + writes := []string{} + for i := 1; i < 10; i++ { + data := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano()) + writes = append(writes, data) + } + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "SLIMIT 2 SOFFSET 1", + command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "SLIMIT 2 SOFFSET 3", + command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "SLIMIT 3 SOFFSET 8", + command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Regex(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu1,host=server01 value=10 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf(`cpu2,host=server01 value=20 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf(`cpu3,host=server01 value=30 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "default db and rp", + command: `SELECT * FROM /cpu[13]/`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",10]]},{"name":"cpu3","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",30]]}]}]}`, + }, + &Query{ + name: "default db and rp with GROUP BY *", + command: `SELECT * FROM /cpu[13]/ GROUP BY *`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, + }, + &Query{ + name: "specifying db and rp", + command: `SELECT * FROM db0.rp0./cpu[13]/ GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, + }, + &Query{ + name: "default db and specified rp", + command: `SELECT * FROM rp0./cpu[13]/ GROUP BY *`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, + }, + &Query{ + name: "specified db and default rp", + command: `SELECT * FROM db0../cpu[13]/ GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + + fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + + fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), + fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), + fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), + + fmt.Sprintf(`intoverlap,region=us-east value=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intoverlap,region=us-east value=30 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`intoverlap,region=us-west value=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intoverlap,region=us-east otherVal=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + + fmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + + fmt.Sprintf(`floatmax value=%s %d`, maxFloat64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`floatmax value=%s %d`, maxFloat64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + + fmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), + + fmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + + fmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + + fmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + fmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + + fmt.Sprintf(`stringdata value="first" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + fmt.Sprintf(`stringdata value="last" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + // int64 + &Query{ + name: "stddev with just one point - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT STDDEV(value) FROM int`, + exp: `{"results":[{"series":[{"name":"int","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + }, + &Query{ + name: "large mean and stddev - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEAN(value), STDDEV(value) FROM intmax`, + exp: `{"results":[{"series":[{"name":"intmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxInt64() + `,0]]}]}]}`, + }, + &Query{ + name: "mean and stddev - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEAN(value), STDDEV(value) FROM intmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, + }, + &Query{ + name: "first - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT FIRST(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "last - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT LAST(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",9]]}]}]}`, + }, + &Query{ + name: "spread - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SPREAD(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, + }, + &Query{ + name: "median - even count - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, + }, + &Query{ + name: "median - odd count - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM intmany where time < '2000-01-01T00:01:10Z'`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, + }, + &Query{ + name: "distinct as call - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, + }, + &Query{ + name: "distinct alt syntax - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT value FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, + }, + &Query{ + name: "distinct select tag - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT(host) FROM intmany`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + }, + &Query{ + name: "distinct alt select tag - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT host FROM intmany`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + }, + &Query{ + name: "count distinct - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + }, + &Query{ + name: "count distinct as call - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT(value)) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + }, + &Query{ + name: "count distinct select tag - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT host) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + }, + &Query{ + name: "count distinct as call select tag - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT host) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + }, + &Query{ + name: "aggregation with no interval - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT count(value) FROM intoverlap WHERE time = '2000-01-01 00:00:00'`, + exp: `{"results":[{"series":[{"name":"intoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "sum - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM intoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, + exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, + }, + &Query{ + name: "aggregation with a null field value - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM intoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, + }, + &Query{ + name: "multiple aggregations - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value), MEAN(value) FROM intoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, + }, + &Query{ + skip: true, + name: "multiple aggregations with division - int FIXME issue #2879", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value), mean(value), sum(value) / mean(value) as div FROM intoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean","div"],"values":[["1970-01-01T00:00:00Z",50,25,2]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",100,100,1]]}]}]}`, + }, + + // float64 + &Query{ + name: "stddev with just one point - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT STDDEV(value) FROM floatsingle`, + exp: `{"results":[{"series":[{"name":"floatsingle","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + }, + &Query{ + name: "large mean and stddev - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEAN(value), STDDEV(value) FROM floatmax`, + exp: `{"results":[{"series":[{"name":"floatmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxFloat64() + `,0]]}]}]}`, + }, + &Query{ + name: "mean and stddev - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEAN(value), STDDEV(value) FROM floatmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, + }, + &Query{ + name: "first - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT FIRST(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "last - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT LAST(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",9]]}]}]}`, + }, + &Query{ + name: "spread - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SPREAD(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, + }, + &Query{ + name: "median - even count - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, + }, + &Query{ + name: "median - odd count - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM floatmany where time < '2000-01-01T00:01:10Z'`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, + }, + &Query{ + name: "distinct as call - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, + }, + &Query{ + name: "distinct alt syntax - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT value FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, + }, + &Query{ + name: "distinct select tag - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT(host) FROM floatmany`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + }, + &Query{ + name: "distinct alt select tag - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT host FROM floatmany`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + }, + &Query{ + name: "count distinct - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + }, + &Query{ + name: "count distinct as call - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT(value)) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + }, + &Query{ + name: "count distinct select tag - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT host) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + }, + &Query{ + name: "count distinct as call select tag - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT host) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + }, + &Query{ + name: "aggregation with no interval - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT count(value) FROM floatoverlap WHERE time = '2000-01-01 00:00:00'`, + exp: `{"results":[{"series":[{"name":"floatoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "sum - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM floatoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, + exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, + }, + &Query{ + name: "aggregation with a null field value - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM floatoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, + }, + &Query{ + name: "multiple aggregations - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value), MEAN(value) FROM floatoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, + }, + &Query{ + name: "multiple aggregations with division - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) / mean(value) as div FROM floatoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",2]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + + // strings + &Query{ + name: "STDDEV on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT STDDEV(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + }, + &Query{ + name: "MEAN on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEAN(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + }, + &Query{ + name: "MEDIAN on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + }, + &Query{ + name: "COUNT on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "FIRST on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT FIRST(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","first"],"values":[["1970-01-01T00:00:00Z","first"]]}]}]}`, + }, + &Query{ + name: "LAST on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT LAST(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","last"],"values":[["1970-01-01T00:00:00Z","last"]]}]}]}`, + }, + + // general queries + &Query{ + name: "group by multiple dimensions", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) FROM load GROUP BY region, host`, + exp: `{"results":[{"series":[{"name":"load","tags":{"host":"serverA","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",20]]},{"name":"load","tags":{"host":"serverB","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",30]]},{"name":"load","tags":{"host":"serverC","region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, + }, + &Query{ + name: "aggregation with WHERE and AND", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, + }, + + // Mathematics + &Query{ + name: "group by multiple dimensions", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value)*2 FROM load`, + exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`, + }, + &Query{ + name: "group by multiple dimensions", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value)/2 FROM load`, + exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`, + }, + + // order by time desc + &Query{ + name: "aggregate order by time desc", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:00Z' group by time(10s) order by time desc`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:01:00Z",7],["2000-01-01T00:00:50Z",5],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:00Z",2]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_AggregatesTopInt(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + // cpu data with overlapping duplicate values + // hour 0 + fmt.Sprintf(`cpu,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02 value=3.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`cpu,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + // hour 1 + fmt.Sprintf(`cpu,host=server04 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server05 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:10Z").UnixNano()), + fmt.Sprintf(`cpu,host=server06 value=6.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:20Z").UnixNano()), + // hour 2 + fmt.Sprintf(`cpu,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:10Z").UnixNano()), + + // memory data + // hour 0 + fmt.Sprintf(`memory,host=a,service=redis value=1000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=mysql value=2000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=redis value=1500i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + // hour 1 + fmt.Sprintf(`memory,host=a,service=redis value=1001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=mysql value=2001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=redis value=1501i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + // hour 2 + fmt.Sprintf(`memory,host=a,service=redis value=1002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=mysql value=2002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=redis value=1502i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "top - cpu", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 1) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - 2 values", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 2) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - 3 values - sorts on tie properly", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 3) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - with tag", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, 2) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top","host"],"values":[["2000-01-01T01:00:10Z",7,"server05"],["2000-01-01T02:00:10Z",9,"server08"]]}]}]}`, + }, + &Query{ + name: "top - cpu - 3 values with limit 2", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 3) FROM cpu limit 2`, + exp: `{"error":"error parsing query: limit (3) in top function can not be larger than the LIMIT (2) in the select statement"}`, + }, + &Query{ + name: "top - cpu - hourly", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T01:00:00Z",7],["2000-01-01T02:00:00Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - time specified - hourly", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - time specified (not first) - hourly", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 1), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - 2 values hourly", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, + }, + &Query{ + name: "top - cpu - time specified - 2 values hourly", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 2), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:00Z",2],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T01:00:00Z",5],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, + }, + &Query{ + name: "top - cpu - time specified - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 3), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:00Z",5],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - memory - 2 values, two tags", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 2), host, service FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T01:00:00Z",2001,"b","mysql"],["2000-01-01T02:00:00Z",2002,"b","mysql"]]}]}]}`, + }, + &Query{ + name: "top - memory - host tag with limit 2", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, 2) FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host"],"values":[["2000-01-01T02:00:00Z",2002,"b"],["2000-01-01T02:00:00Z",1002,"a"]]}]}]}`, + }, + &Query{ + name: "top - memory - host tag with limit 2, service tag in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, 2), service FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, + }, + &Query{ + name: "top - memory - service tag with limit 2, host tag in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, service, 2), host FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","service","host"],"values":[["2000-01-01T02:00:00Z",2002,"mysql","b"],["2000-01-01T02:00:00Z",1502,"redis","b"]]}]}]}`, + }, + &Query{ + name: "top - memory - host and service tag with limit 2", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, service, 2) FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"]]}]}]}`, + }, + &Query{ + name: "top - memory - host tag with limit 2 with service tag in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, 2), service FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, + }, + &Query{ + name: "top - memory - host and service tag with limit 3", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, service, 3) FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, + }, + + // TODO + // - Test that specifiying fields or tags in the function will rewrite the query to expand them to the fields + // - Test that a field can be used in the top function + // - Test that asking for a field will come back before a tag if they have the same name for a tag and a field + // - Test that `select top(value, host, 2)` when there is only one value for `host` it will only bring back one value + // - Test that `select top(value, host, 4) from foo where time > now() - 1d and time < now() group by time(1h)` and host is unique in some time buckets that it returns only the unique ones, and not always 4 values + + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Test various aggregates when different series only have data for the same timestamp. +func TestServer_Query_AggregatesIdenticalTime(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`series,host=a value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=b value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=c value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=d value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=e value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=f value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=g value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=h value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=i value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "last from multiple series with identical timestamp", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT last(value) FROM "series"`, + exp: `{"results":[{"series":[{"name":"series","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + repeat: 100, + }, + &Query{ + name: "first from multiple series with identical timestamp", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT first(value) FROM "series"`, + exp: `{"results":[{"series":[{"name":"series","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + repeat: 100, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + for n := 0; n <= query.repeat; n++ { + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + } +} + +// This will test that when using a group by, that it observes the time you asked for +// but will only put the values in the bucket that match the time range +func TestServer_Query_GroupByTimeCutoffs(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu value=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu value=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`cpu value=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:05Z").UnixNano()), + fmt.Sprintf(`cpu value=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:08Z").UnixNano()), + fmt.Sprintf(`cpu value=5i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:09Z").UnixNano()), + fmt.Sprintf(`cpu value=6i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "sum all time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",21]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s missing first point", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s missing first points (null for bucket)", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:02Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s missing last point - 2 time intervals", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:09Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s missing last 2 points - 2 time intervals", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:08Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",7]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Write_Precision(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []struct { + write string + params url.Values + }{ + { + write: fmt.Sprintf("cpu_n0_precision value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), + }, + { + write: fmt.Sprintf("cpu_n1_precision value=1.1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), + params: url.Values{"precision": []string{"n"}}, + }, + { + write: fmt.Sprintf("cpu_u_precision value=100 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Microsecond).UnixNano()/int64(time.Microsecond)), + params: url.Values{"precision": []string{"u"}}, + }, + { + write: fmt.Sprintf("cpu_ms_precision value=200 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Millisecond).UnixNano()/int64(time.Millisecond)), + params: url.Values{"precision": []string{"ms"}}, + }, + { + write: fmt.Sprintf("cpu_s_precision value=300 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Second).UnixNano()/int64(time.Second)), + params: url.Values{"precision": []string{"s"}}, + }, + { + write: fmt.Sprintf("cpu_m_precision value=400 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Minute).UnixNano()/int64(time.Minute)), + params: url.Values{"precision": []string{"m"}}, + }, + { + write: fmt.Sprintf("cpu_h_precision value=500 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Hour).UnixNano()/int64(time.Hour)), + params: url.Values{"precision": []string{"h"}}, + }, + } + + test := NewTest("db0", "rp0") + + test.addQueries([]*Query{ + &Query{ + name: "point with nanosecond precision time - no precision specified on write", + command: `SELECT * FROM cpu_n0_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_n0_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1]]}]}]}`, + }, + &Query{ + name: "point with nanosecond precision time", + command: `SELECT * FROM cpu_n1_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_n1_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1.1]]}]}]}`, + }, + &Query{ + name: "point with microsecond precision time", + command: `SELECT * FROM cpu_u_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_u_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012Z",100]]}]}]}`, + }, + &Query{ + name: "point with millisecond precision time", + command: `SELECT * FROM cpu_ms_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_ms_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789Z",200]]}]}]}`, + }, + &Query{ + name: "point with second precision time", + command: `SELECT * FROM cpu_s_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_s_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56Z",300]]}]}]}`, + }, + &Query{ + name: "point with minute precision time", + command: `SELECT * FROM cpu_m_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_m_precision","columns":["time","value"],"values":[["2000-01-01T12:34:00Z",400]]}]}]}`, + }, + &Query{ + name: "point with hour precision time", + command: `SELECT * FROM cpu_h_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_h_precision","columns":["time","value"],"values":[["2000-01-01T12:00:00Z",500]]}]}]}`, + }, + }...) + + // we are doing writes that require parameter changes, so we are fighting the test harness a little to make this happen properly + for _, w := range writes { + test.write = w.write + test.params = w.params + test.initialized = false + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Wildcards(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`wildcard,region=us-east value=10 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-east valx=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-east value=30,valx=40 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + + fmt.Sprintf(`wgroup,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`wgroup,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`wgroup,region=us-west value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + + fmt.Sprintf(`m1,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`m2,host=server01 field=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "wildcard", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, + }, + &Query{ + name: "wildcard with group by", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard GROUP BY *`, + exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, + }, + &Query{ + name: "GROUP BY queries", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(value) FROM wgroup GROUP BY *`, + exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",30]]}]}]}`, + }, + &Query{ + name: "GROUP BY queries with time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(value) FROM wgroup WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY *,TIME(1m)`, + exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`, + }, + &Query{ + name: "wildcard and field in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT value, * FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, + }, + &Query{ + name: "field and wildcard in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT value, * FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, + }, + &Query{ + name: "field and wildcard in group by", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard GROUP BY region, *`, + exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, + }, + &Query{ + name: "wildcard and field in group by", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard GROUP BY *, region`, + exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, + }, + &Query{ + name: "wildcard with multiple measurements", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM m1, m2`, + exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, + }, + &Query{ + name: "wildcard with multiple measurements via regex", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM /^m.*/`, + exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, + }, + &Query{ + name: "wildcard with multiple measurements via regex and limit", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM db0../^m.*/ LIMIT 2`, + exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_WildcardExpansion(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`wildcard,region=us-east,host=A value=10,cpu=80 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-east,host=B value=20,cpu=90 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-west,host=B value=30,cpu=70 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-east,host=A value=40,cpu=60 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + + fmt.Sprintf(`dupnames,region=us-east,day=1 value=10,day=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`dupnames,region=us-east,day=2 value=20,day=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`dupnames,region=us-west,day=3 value=30,day=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "wildcard", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, + }, + &Query{ + name: "no wildcard in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT cpu, host, region, value FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, + }, + &Query{ + name: "no wildcard in select, preserve column order", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT host, cpu, region, value FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","host","cpu","region","value"],"values":[["2000-01-01T00:00:00Z","A",80,"us-east",10],["2000-01-01T00:00:10Z","B",90,"us-east",20],["2000-01-01T00:00:20Z","B",70,"us-west",30],["2000-01-01T00:00:30Z","A",60,"us-east",40]]}]}]}`, + }, + + &Query{ + name: "only tags, no fields", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT host, region FROM wildcard`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + }, + + &Query{ + name: "no wildcard with alias", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT cpu as c, host as h, region, value FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","c","h","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, + }, + &Query{ + name: "duplicate tag and field name, always favor field over tag", + command: `SELECT * FROM dupnames`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"dupnames","columns":["time","day","region","value"],"values":[["2000-01-01T00:00:00Z",3,"us-east",10],["2000-01-01T00:00:10Z",2,"us-east",20],["2000-01-01T00:00:20Z",1,"us-west",30]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_AcrossShardsAndFields(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu load=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu load=200 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu core=4 %d`, mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "two results for cpu", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT load FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2000-01-01T00:00:00Z",100],["2010-01-01T00:00:00Z",200]]}]}]}`, + }, + &Query{ + name: "two results for cpu, multi-select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT core,load FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, + }, + &Query{ + name: "two results for cpu, wildcard select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, + }, + &Query{ + name: "one result for core", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT core FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2015-01-01T00:00:00Z",4]]}]}]}`, + }, + &Query{ + name: "empty result set from non-existent field", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT foo FROM cpu`, + exp: `{"results":[{}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Where_Fields(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + + fmt.Sprintf(`cpu load=100.0,core=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`cpu load=80.0,core=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:01:02Z").UnixNano()), + + fmt.Sprintf(`clicks local=true %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:01Z").UnixNano()), + fmt.Sprintf(`clicks local=false %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:02Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + // non type specific + &Query{ + name: "missing measurement with group by", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT load from missing group by *`, + exp: `{"results":[{}]}`, + }, + + // string + &Query{ + name: "single string field", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id FROM cpu WHERE alert_id='alert'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, + }, + &Query{ + name: "string AND query, all fields in SELECT", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id,tenant_id,_cust FROM cpu WHERE alert_id='alert' AND tenant_id='tenant'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id","_cust"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant","johnson brothers"]]}]}]}`, + }, + &Query{ + name: "string AND query, all fields in SELECT, one in parenthesis", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id,tenant_id FROM cpu WHERE alert_id='alert' AND (tenant_id='tenant')`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant"]]}]}]}`, + }, + &Query{ + name: "string underscored field", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id FROM cpu WHERE _cust='johnson brothers'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, + }, + &Query{ + name: "string no match", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id FROM cpu WHERE _cust='acme'`, + exp: `{"results":[{}]}`, + }, + + // float64 + &Query{ + name: "float64 GT no match", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load > 100`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "float64 GTE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load >= 100`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, + }, + &Query{ + name: "float64 EQ match upper bound", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load = 100`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, + }, + &Query{ + name: "float64 LTE match two", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load <= 100`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100],["2009-11-10T23:01:02Z",80]]}]}]}`, + }, + &Query{ + name: "float64 GT match one", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load > 99`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, + }, + &Query{ + name: "float64 EQ no match", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load = 99`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "float64 LT match one", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load < 99`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, + }, + &Query{ + name: "float64 LT no match", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load < 80`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "float64 NE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load != 100`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, + }, + + // int64 + &Query{ + name: "int64 GT no match", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core > 4`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "int64 GTE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core >= 4`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, + }, + &Query{ + name: "int64 EQ match upper bound", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core = 4`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, + }, + &Query{ + name: "int64 LTE match two ", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core <= 4`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4],["2009-11-10T23:01:02Z",2]]}]}]}`, + }, + &Query{ + name: "int64 GT match one", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core > 3`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, + }, + &Query{ + name: "int64 EQ no match", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core = 3`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "int64 LT match one", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core < 3`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, + }, + &Query{ + name: "int64 LT no match", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core < 2`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "int64 NE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core != 4`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, + }, + + // bool + &Query{ + name: "bool EQ match true", + params: url.Values{"db": []string{"db0"}}, + command: `select local from clicks where local = true`, + exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:01Z",true]]}]}]}`, + }, + &Query{ + name: "bool EQ match false", + params: url.Values{"db": []string{"db0"}}, + command: `select local from clicks where local = false`, + exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, + }, + + &Query{ + name: "bool NE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select local from clicks where local != true`, + exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Where_With_Tags(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`where_events,tennant=paul foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`where_events,tennant=paul foo="baz" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), + fmt.Sprintf(`where_events,tennant=paul foo="bat" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), + fmt.Sprintf(`where_events,tennant=todd foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), + fmt.Sprintf(`where_events,tennant=david foo="bap" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "tag field and time", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from where_events where (tennant = 'paul' OR tennant = 'david') AND time > 1s AND (foo = 'bar' OR foo = 'baz' OR foo = 'bap')`, + exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, + }, + &Query{ + name: "where on tag that should be double quoted but isn't", + params: url.Values{"db": []string{"db0"}}, + command: `show series where data-center = 'foo'`, + exp: `{"results":[{"error":"invalid expression: data - center = 'foo'"}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_LimitAndOffset(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`limited,tennant=paul foo=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`limited,tennant=paul foo=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), + fmt.Sprintf(`limited,tennant=paul foo=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), + fmt.Sprintf(`limited,tennant=todd foo=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "limit on points", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from "limited" LIMIT 2`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, + }, + &Query{ + name: "limit higher than the number of data points", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from "limited" LIMIT 20`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, + }, + &Query{ + name: "limit and offset", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from "limited" LIMIT 2 OFFSET 1`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, + }, + &Query{ + name: "limit + offset equal to total number of points", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from "limited" LIMIT 3 OFFSET 3`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, + }, + &Query{ + name: "limit - offset higher than number of points", + command: `select foo from "limited" LIMIT 2 OFFSET 20`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit on points with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit higher than the number of data points with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 20`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit and offset with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 1`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit + offset equal to the number of points with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 3 OFFSET 3`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit - offset higher than number of points with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 20`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit higher than the number of data points should error", + command: `select mean(foo) from "limited" where time > '2000-01-01T00:00:00Z' group by time(1s), * fill(0) limit 2147483647`, + exp: `{"results":[{"error":"too many points in the group by interval. maybe you forgot to specify a where time clause?"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit1 higher than MaxGroupBy but the number of data points is less than MaxGroupBy", + command: `select mean(foo) from "limited" where time >= '2009-11-10T23:00:02Z' and time < '2009-11-10T23:00:03Z' group by time(1s), * fill(0) limit 2147483647`, + exp: `{"results":[{"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2]]},{"name":"limited","tags":{"tennant":"todd"},"columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",0]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Fill(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`fills val=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`fills val=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), + fmt.Sprintf(`fills val=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), + fmt.Sprintf(`fills val=10 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:16Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "fill with value", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(1)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with value, WHERE all values match condition", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val < 50 group by time(5s) FILL(1)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with value, WHERE no values match condition", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 50 group by time(5s) FILL(1)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",1],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with previous", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(previous)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with none, i.e. clear out nulls", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(none)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill defaults to null", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with count aggregate defaults to null", + command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",1]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with count aggregate defaults to null, no values match", + command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 100 group by time(5s)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",null],["2009-11-10T23:00:05Z",null],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",null]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with count aggregate specific value", + command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(1234)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1234],["2009-11-10T23:00:15Z",1]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Chunk(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := make([]string, 10001) // 10,000 is the default chunking size, even when no chunking requested. + expectedValues := make([]string, len(writes)) + for i := 0; i < len(writes); i++ { + writes[i] = fmt.Sprintf(`cpu value=%d %d`, i, time.Unix(0, int64(i)).UnixNano()) + expectedValues[i] = fmt.Sprintf(`["%s",%d]`, time.Unix(0, int64(i)).UTC().Format(time.RFC3339Nano), i) + } + expected := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[%s]}]}]}`, strings.Join(expectedValues, ",")) + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "SELECT all values, no chunking", + command: `SELECT value FROM cpu`, + exp: expected, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + +} + +func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=serverB,region=uswest val=33.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "Drop Measurement, series tags preserved tests", + command: `SHOW MEASUREMENTS`, + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show series", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]},{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "ensure we can query for memory with both tags", + command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, + exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "drop measurement cpu", + command: `DROP MEASUREMENT cpu`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify measurements", + command: `SHOW MEASUREMENTS`, + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["memory"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify series", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify cpu measurement is gone", + command: `SELECT * FROM cpu`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify selecting from a tag 'host' still works", + command: `SELECT * FROM memory where host='serverB' GROUP BY *`, + exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify selecting from a tag 'region' still works", + command: `SELECT * FROM memory where region='uswest' GROUP BY *`, + exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify selecting from a tag 'host' and 'region' still works", + command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, + exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop non-existant measurement", + command: `DROP MEASUREMENT doesntexist`, + exp: `{"results":[{"error":"measurement not found: doesntexist"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + // Test that re-inserting the measurement works fine. + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + + test = NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "verify measurements after recreation", + command: `SHOW MEASUREMENTS`, + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify cpu measurement has been re-inserted", + command: `SELECT * FROM cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ShowSeries(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), + fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), + fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), + fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:07Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: `show series`, + command: "SHOW SERIES", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series from measurement`, + command: "SHOW SERIES FROM cpu", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series from regular expression`, + command: "SHOW SERIES FROM /[cg]pu/", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series with where tag`, + command: "SHOW SERIES WHERE region = 'uswest'", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=uswest","server01","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series where tag matches regular expression`, + command: "SHOW SERIES WHERE region =~ /ca.*/", + exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series`, + command: "SHOW SERIES WHERE host !~ /server0[12]/", + exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series with from and where`, + command: "SHOW SERIES FROM cpu WHERE region = 'useast'", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ShowMeasurements(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server02,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`other,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: `show measurements with limit 2`, + command: "SHOW MEASUREMENTS LIMIT 2", + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show measurements where tag matches regular expression`, + command: "SHOW MEASUREMENTS WHERE region =~ /ca.*/", + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["gpu"],["other"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show measurements where tag does not match a regular expression`, + command: "SHOW MEASUREMENTS WHERE region !~ /ca.*/", + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ShowTagKeys(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: `show tag keys`, + command: "SHOW TAG KEYS", + exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag keys from", + command: "SHOW TAG KEYS FROM cpu", + exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag keys from regex", + command: "SHOW TAG KEYS FROM /[cg]pu/", + exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag keys measurement not found", + command: "SHOW TAG KEYS FROM bad", + exp: `{"results":[{"error":"measurement not found: bad"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag values with key", + command: "SHOW TAG VALUES WITH KEY = host", + exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and where`, + command: `SHOW TAG VALUES FROM cpu WITH KEY = host WHERE region = 'uswest'`, + exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and where matches regular expression`, + command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /ca.*/`, + exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server03"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and where does not matche regular expression`, + command: `SHOW TAG VALUES WITH KEY = region WHERE host !~ /server0[12]/`, + exp: `{"results":[{"series":[{"name":"regionTagValues","columns":["region"],"values":[["caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key in and where does not matche regular expression`, + command: `SHOW TAG VALUES FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`, + exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"]]},{"name":"regionTagValues","columns":["region"],"values":[["uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and measurement matches regular expression`, + command: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`, + exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ShowFieldKeys(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 field1=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server01,region=useast field4=200,field5=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server03,region=caeast field6=200,field7=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`disk,host=server03,region=caeast field8=200,field9=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: `show field keys`, + command: `SHOW FIELD KEYS`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"disk","columns":["fieldKey"],"values":[["field8"],["field9"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show field keys from measurement`, + command: `SHOW FIELD KEYS FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show field keys measurement with regex`, + command: `SHOW FIELD KEYS FROM /[cg]pu/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_ContinuousQuery(t *testing.T) { + t.Skip() + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + runTest := func(test *Test, t *testing.T) { + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + } + + // Start times of CQ intervals. + interval0 := time.Now().Add(-time.Second).Round(time.Second * 5) + interval1 := interval0.Add(-time.Second * 5) + interval2 := interval0.Add(-time.Second * 10) + interval3 := interval0.Add(-time.Second * 15) + + writes := []string{ + // Point too far in the past for CQ to pick up. + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval3.Add(time.Second).UnixNano()), + + // Points two intervals ago. + fmt.Sprintf(`cpu,host=server01 value=100 %d`, interval2.Add(time.Second).UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval2.Add(time.Second*2).UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, interval2.Add(time.Second*3).UnixNano()), + + // Points one interval ago. + fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, interval1.Add(time.Second).UnixNano()), + fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval1.Add(time.Second*2).UnixNano()), + + // Points in the current interval. + fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second).UnixNano()), + fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second*2).UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + test.addQueries([]*Query{ + &Query{ + name: `create another retention policy for CQ to write into`, + command: `CREATE RETENTION POLICY rp1 ON db0 DURATION 1h REPLICATION 1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create continuous query with backreference", + command: `CREATE CONTINUOUS QUERY "cq1" ON db0 BEGIN SELECT count(value) INTO "rp1".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s) END`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: `create another retention policy for CQ to write into`, + command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create continuous query with backreference and group by time", + command: `CREATE CONTINUOUS QUERY "cq2" ON db0 BEGIN SELECT count(value) INTO "rp2".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s), * END`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: `show continuous queries`, + command: `SHOW CONTINUOUS QUERIES`, + exp: `{"results":[{"series":[{"name":"db0","columns":["name","query"],"values":[["cq1","CREATE CONTINUOUS QUERY cq1 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp1\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s) END"],["cq2","CREATE CONTINUOUS QUERY cq2 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp2\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s), * END"]]}]}]}`, + }, + }...) + + // Run first test to create CQs. + runTest(&test, t) + + // Trigger CQs to run. + u := fmt.Sprintf("%s/data/process_continuous_queries?time=%d", s.URL(), interval0.UnixNano()) + if _, err := s.HTTPPost(u, nil); err != nil { + t.Fatal(err) + } + + // Wait for CQs to run. TODO: fix this ugly hack + time.Sleep(time.Second * 5) + + // Setup tests to check the CQ results. + test2 := NewTest("db0", "rp1") + test2.addQueries([]*Query{ + &Query{ + name: "check results of cq1", + command: `SELECT * FROM "rp1"./[cg]pu/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",3,null,null,null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",2,null,null,null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,null,null,null]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + // TODO: restore this test once this is fixed: https://github.com/influxdb/influxdb/issues/3968 + &Query{ + skip: true, + name: "check results of cq2", + command: `SELECT * FROM "rp2"./[cg]pu/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","uswest",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","useast",null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server02","useast",null],["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + // Run second test to check CQ results. + runTest(&test2, t) +} + +// Tests that a known CQ query with concurrent writes does not deadlock the server +func TestServer_ContinuousQuery_Deadlock(t *testing.T) { + + // Skip until #3517 & #3522 are merged + t.Skip("Skipping CQ deadlock test") + if testing.Short() { + t.Skip("skipping CQ deadlock test") + } + t.Parallel() + s := OpenServer(NewConfig(), "") + defer func() { + s.Close() + // Nil the server so our deadlock detector goroutine can determine if we completed writes + // without timing out + s.Server = nil + }() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + + test.addQueries([]*Query{ + &Query{ + name: "create continuous query", + command: `CREATE CONTINUOUS QUERY "my.query" ON db0 BEGIN SELECT sum(visits) as visits INTO test_1m FROM myseries GROUP BY time(1m), host END`, + exp: `{"results":[{}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + + // Deadlock detector. If the deadlock is fixed, this test should complete all the writes in ~2.5s seconds (with artifical delays + // added). After 10 seconds, if the server has not been closed then we hit the deadlock bug. + iterations := 0 + go func(s *Server) { + <-time.After(10 * time.Second) + + // If the server is not nil then the test is still running and stuck. We panic to avoid + // having the whole test suite hang indefinitely. + if s.Server != nil { + panic("possible deadlock. writes did not complete in time") + } + }(s) + + for { + + // After the second write, if the deadlock exists, we'll get a write timeout and + // all subsequent writes will timeout + if iterations > 5 { + break + } + writes := []string{} + for i := 0; i < 1000; i++ { + writes = append(writes, fmt.Sprintf(`myseries,host=host-%d visits=1i`, i)) + } + write := strings.Join(writes, "\n") + + if _, err := s.Write(test.db, test.rp, write, test.params); err != nil { + t.Fatal(err) + } + iterations += 1 + time.Sleep(500 * time.Millisecond) + } +} + +func TestServer_Query_EvilIdentifiers(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.write = fmt.Sprintf("cpu select=1,in-bytes=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()) + + test.addQueries([]*Query{ + &Query{ + name: `query evil identifiers`, + command: `SELECT "select", "in-bytes" FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","select","in-bytes"],"values":[["2000-01-01T00:00:00Z",1,2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_OrderByTime(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server1 value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`cpu,host=server1 value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), + fmt.Sprintf(`cpu,host=server1 value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "order on points", + params: url.Values{"db": []string{"db0"}}, + command: `select value from "cpu" ORDER BY time DESC`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:01Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "baseline", + params: url.Values{"db": []string{"db0"}}, + command: `select * from cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "select field with periods", + params: url.Values{"db": []string{"db0"}}, + command: `select "foo.bar.baz" from cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`foo foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "baseline", + params: url.Values{"db": []string{"db0"}}, + command: `select * from foo`, + exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "select field with periods", + params: url.Values{"db": []string{"db0"}}, + command: `select "foo.bar.baz" from foo`, + exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md new file mode 100644 index 000000000..2b6883de7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md @@ -0,0 +1,150 @@ +# Server Integration Tests + +Currently, the file `server_test.go` has integration tests for single node scenarios. +At some point we'll need to add cluster tests, and may add them in a different file, or +rename `server_test.go` to `server_single_node_test.go` or something like that. + +## What is in a test? + +Each test is broken apart effectively into the following areas: + +- Write sample data +- Use cases for table driven test, that include a command (typically a query) and an expected result. + +When each test runs it does the following: + +- init: determines if there are any writes and if so, writes them to the in-memory database +- queries: iterate through each query, executing the command, and comparing the results to the expected result. + +## Idempotent - Allows for parallel tests + +Each test should be `idempotent`, meaining that its data will not be affected by other tests, or use cases within the table tests themselves. +This allows for parallel testing, keeping the test suite total execution time very low. + +### Basic sample test + +```go +// Ensure the server can have a database with multiple measurements. +func TestServer_Query_Multiple_Measurements(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + // Make sure we do writes for measurements that will span across shards + writes := []string{ + fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "measurement in one shard but not another shouldn't panic server", + command: `SELECT host,value FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} +``` + +Let's break this down: + +In this test, we first tell it to run in parallel with the `t.Parallel()` call. + +We then open a new server with: + +```go +s := OpenServer(NewConfig(), "") +defer s.Close() +``` + +If needed, we create a database and default retention policy. This is usually needed +when inserting and querying data. This is not needed if you are testing commands like `CREATE DATABASE`, `SHOW DIAGNOSTICS`, etc. + +```go +if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) +} +``` + +Next, set up the write data you need: + +```go +writes := []string{ + fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), +} +``` +Create a new test with the database and retention policy: + +```go +test := NewTest("db0", "rp0") +``` + +Send in the writes: +```go +test.write = strings.Join(writes, "\n") +``` + +Add some queries (the second one is mocked out to show how to add more than one): + +```go +test.addQueries([]*Query{ + &Query{ + name: "measurement in one shard but not another shouldn't panic server", + command: `SELECT host,value FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, + }, + &Query{ + name: "another test here...", + command: `Some query command`, + exp: `the expected results`, + }, +}...) +``` + +The rest of the code is boilerplate execution code. It is purposefully not refactored out to a helper +to make sure the test failure reports the proper lines for debugging purposes. + +#### Running the tests + +To run the tests: + +```sh +go test ./cmd/influxd/run -parallel 500 -timeout 10s +``` + +#### Running a specific test + +```sh +go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill +``` + +#### Verbose feedback + +By default, all logs are silenced when testing. If you pass in the `-v` flag, the test suite becomes verbose, and enables all logging in the system + +```sh +go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill -v +``` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/inspect/main.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/inspect/main.go new file mode 100644 index 000000000..43b233f54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/inspect/main.go @@ -0,0 +1,142 @@ +package main + +import ( + "encoding/binary" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + "text/tabwriter" + + "github.com/influxdb/influxdb/tsdb" + _ "github.com/influxdb/influxdb/tsdb/engine" +) + +func main() { + + var path string + flag.StringVar(&path, "p", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]") + flag.Parse() + + tstore := tsdb.NewStore(filepath.Join(path, "data")) + tstore.Logger = log.New(ioutil.Discard, "", log.LstdFlags) + tstore.EngineOptions.Config.Dir = filepath.Join(path, "data") + tstore.EngineOptions.Config.WALLoggingEnabled = false + tstore.EngineOptions.Config.WALDir = filepath.Join(path, "wal") + if err := tstore.Open(); err != nil { + fmt.Printf("Failed to open dir: %v\n", err) + os.Exit(1) + } + + size, err := tstore.DiskSize() + if err != nil { + fmt.Printf("Failed to determine disk usage: %v\n", err) + } + + // Summary stats + fmt.Printf("Shards: %d, Indexes: %d, Databases: %d, Disk Size: %d, Series: %d\n", + tstore.ShardN(), tstore.DatabaseIndexN(), len(tstore.Databases()), size, countSeries(tstore)) + fmt.Println() + + tw := tabwriter.NewWriter(os.Stdout, 16, 8, 0, '\t', 0) + + fmt.Fprintln(tw, strings.Join([]string{"Shard", "DB", "Measurement", "Tags [#K/#V]", "Fields [Name:Type]", "Series"}, "\t")) + + shardIDs := tstore.ShardIDs() + + databases := tstore.Databases() + sort.Strings(databases) + + for _, db := range databases { + index := tstore.DatabaseIndex(db) + measurements := index.Measurements() + sort.Sort(measurements) + for _, m := range measurements { + tags := m.TagKeys() + tagValues := 0 + for _, tag := range tags { + tagValues += len(m.TagValues(tag)) + } + fields := m.FieldNames() + sort.Strings(fields) + series := m.SeriesKeys() + sort.Strings(series) + sort.Sort(ShardIDs(shardIDs)) + + // Sample a point from each measurement to determine the field types + for _, shardID := range shardIDs { + shard := tstore.Shard(shardID) + tx, err := shard.ReadOnlyTx() + if err != nil { + fmt.Printf("Failed to get transaction: %v", err) + } + + for _, key := range series { + fieldSummary := []string{} + + cursor := tx.Cursor(key, tsdb.Forward) + + // Series doesn't exist in this shard + if cursor == nil { + continue + } + + // Seek to the beginning + _, value := cursor.Seek([]byte{}) + codec := shard.FieldCodec(m.Name) + if codec != nil { + fields, err := codec.DecodeFieldsWithNames(value) + if err != nil { + fmt.Printf("Failed to decode values: %v", err) + } + + for field, value := range fields { + fieldSummary = append(fieldSummary, fmt.Sprintf("%s:%T", field, value)) + } + sort.Strings(fieldSummary) + } + fmt.Fprintf(tw, "%d\t%s\t%s\t%d/%d\t%d [%s]\t%d\n", shardID, db, m.Name, len(tags), tagValues, + len(fields), strings.Join(fieldSummary, ","), len(series)) + break + } + tx.Rollback() + } + } + } + tw.Flush() +} + +func countSeries(tstore *tsdb.Store) int { + var count int + for _, shardID := range tstore.ShardIDs() { + shard := tstore.Shard(shardID) + cnt, err := shard.SeriesCount() + if err != nil { + fmt.Printf("series count failed: %v\n", err) + continue + } + count += cnt + } + return count +} + +func btou64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +// u64tob converts a uint64 into an 8-byte slice. +func u64tob(v uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, v) + return b +} + +type ShardIDs []uint64 + +func (a ShardIDs) Len() int { return len(a) } +func (a ShardIDs) Less(i, j int) bool { return a[i] < a[j] } +func (a ShardIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go new file mode 100644 index 000000000..c18f2c449 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go @@ -0,0 +1,78 @@ +package influxdb + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + "strings" +) + +var ( + // ErrFieldsRequired is returned when a point does not any fields. + ErrFieldsRequired = errors.New("fields required") + + // ErrFieldTypeConflict is returned when a new field already exists with a different type. + ErrFieldTypeConflict = errors.New("field type conflict") +) + +func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } + +func ErrMeasurementNotFound(name string) error { return fmt.Errorf("measurement not found: %s", name) } + +func Errorf(format string, a ...interface{}) (err error) { + if _, file, line, ok := runtime.Caller(2); ok { + a = append(a, file, line) + err = fmt.Errorf(format+" (%s:%d)", a...) + } else { + err = fmt.Errorf(format, a...) + } + return +} + +// IsClientError indicates whether an error is a known client error. +func IsClientError(err error) bool { + if err == nil { + return false + } + + if err == ErrFieldsRequired { + return true + } + if err == ErrFieldTypeConflict { + return true + } + + if strings.Contains(err.Error(), ErrFieldTypeConflict.Error()) { + return true + } + + return false +} + +// mustMarshal encodes a value to JSON. +// This will panic if an error occurs. This should only be used internally when +// an invalid marshal will cause corruption and a panic is appropriate. +func mustMarshalJSON(v interface{}) []byte { + b, err := json.Marshal(v) + if err != nil { + panic("marshal: " + err.Error()) + } + return b +} + +// mustUnmarshalJSON decodes a value from JSON. +// This will panic if an error occurs. This should only be used internally when +// an invalid unmarshal will cause corruption and a panic is appropriate. +func mustUnmarshalJSON(b []byte, v interface{}) { + if err := json.Unmarshal(b, v); err != nil { + panic("unmarshal: " + err.Error()) + } +} + +// assert will panic with a given formatted message if the given condition is false. +func assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assert failed: "+msg, v...)) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc new file mode 100644 index 000000000..a9c1a9ca3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc @@ -0,0 +1 @@ +rvm use ruby-2.1.0@burn-in --create diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile new file mode 100644 index 000000000..b1816e8b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +gem "colorize" +gem "influxdb" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock new file mode 100644 index 000000000..9e721c3a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock @@ -0,0 +1,14 @@ +GEM + remote: https://rubygems.org/ + specs: + colorize (0.6.0) + influxdb (0.0.16) + json + json (1.8.1) + +PLATFORMS + ruby + +DEPENDENCIES + colorize + influxdb diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb new file mode 100644 index 000000000..1d44bc2c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb @@ -0,0 +1,79 @@ +require "influxdb" +require "colorize" +require "benchmark" + +require_relative "log" +require_relative "random_gaussian" + +BATCH_SIZE = 10_000 + +Log.info "Starting burn-in suite" +master = InfluxDB::Client.new +master.delete_database("burn-in") rescue nil +master.create_database("burn-in") +master.create_database_user("burn-in", "user", "pass") + +master.database = "burn-in" +# master.query "select * from test1 into test2;" +# master.query "select count(value) from test1 group by time(1m) into test2;" + +influxdb = InfluxDB::Client.new "burn-in", username: "user", password: "pass" + +Log.success "Connected to server #{influxdb.host}:#{influxdb.port}" + +Log.log "Creating RandomGaussian(500, 25)" +gaussian = RandomGaussian.new(500, 25) +point_count = 0 + +while true + Log.log "Generating 10,000 points.." + points = [] + BATCH_SIZE.times do |n| + points << {value: gaussian.rand.to_i.abs} + end + point_count += points.length + + Log.info "Sending points to server.." + begin + st = Time.now + foo = influxdb.write_point("test1", points) + et = Time.now + Log.log foo.inspect + Log.log "#{et-st} seconds elapsed" + Log.success "Write successful." + rescue => e + Log.failure "Write failed:" + Log.log e + end + sleep 0.5 + + Log.info "Checking regular points" + st = Time.now + response = influxdb.query("select count(value) from test1;") + et = Time.now + + Log.log "#{et-st} seconds elapsed" + + response_count = response["test1"].first["count"] + if point_count == response_count + Log.success "Point counts match: #{point_count} == #{response_count}" + else + Log.failure "Point counts don't match: #{point_count} != #{response_count}" + end + + # Log.info "Checking continuous query points for test2" + # st = Time.now + # response = influxdb.query("select count(value) from test2;") + # et = Time.now + + # Log.log "#{et-st} seconds elapsed" + + # response_count = response["test2"].first["count"] + # if point_count == response_count + # Log.success "Point counts match: #{point_count} == #{response_count}" + # else + # Log.failure "Point counts don't match: #{point_count} != #{response_count}" + # end +end + + diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb new file mode 100644 index 000000000..0f70d7633 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb @@ -0,0 +1,23 @@ +module Log + def self.info(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:yellow) + end + + def self.success(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:green) + end + + def self.failure(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:red) + end + + def self.log(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s + end +end + + diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb new file mode 100644 index 000000000..51d6c3c04 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb @@ -0,0 +1,31 @@ +class RandomGaussian + def initialize(mean, stddev, rand_helper = lambda { Kernel.rand }) + @rand_helper = rand_helper + @mean = mean + @stddev = stddev + @valid = false + @next = 0 + end + + def rand + if @valid then + @valid = false + return @next + else + @valid = true + x, y = self.class.gaussian(@mean, @stddev, @rand_helper) + @next = y + return x + end + end + + private + def self.gaussian(mean, stddev, rand) + theta = 2 * Math::PI * rand.call + rho = Math.sqrt(-2 * Math.log(1 - rand.call)) + scale = stddev * rho + x = mean + scale * Math.cos(theta) + y = mean + scale * Math.sin(theta) + return x, y + end +end diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb new file mode 100644 index 000000000..93bc8314f --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb @@ -0,0 +1,29 @@ +require "influxdb" + +ONE_WEEK_IN_SECONDS = 7*24*60*60 +NUM_POINTS = 10_000 +BATCHES = 100 + +master = InfluxDB::Client.new +master.delete_database("ctx") rescue nil +master.create_database("ctx") + +influxdb = InfluxDB::Client.new "ctx" +influxdb.time_precision = "s" + +names = ["foo", "bar", "baz", "quu", "qux"] + +st = Time.now +BATCHES.times do |m| + points = [] + + puts "Writing #{NUM_POINTS} points, time ##{m}.." + NUM_POINTS.times do |n| + timestamp = Time.now.to_i - rand(ONE_WEEK_IN_SECONDS) + points << {value: names.sample, time: timestamp} + end + + influxdb.write_point("ct1", points) +end +puts st +puts Time.now diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml new file mode 100644 index 000000000..3065252b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml @@ -0,0 +1,262 @@ +### Welcome to the InfluxDB configuration file. + +# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com +# The data includes raft id (random 8 bytes), os, arch, version, and metadata. +# We don't track ip addresses of servers reporting. This is only used +# to track the number of instances running and the versions, which +# is very helpful for us. +# Change this option to true to disable reporting. +reporting-disabled = false + +### +### [meta] +### +### Controls the parameters for the Raft consensus group that stores metadata +### about the InfluxDB cluster. +### + +[meta] + dir = "/var/opt/influxdb/meta" + hostname = "localhost" + bind-address = ":8088" + retention-autocreate = true + election-timeout = "1s" + heartbeat-timeout = "1s" + leader-lease-timeout = "500ms" + commit-timeout = "50ms" + +### +### [data] +### +### Controls where the actual shard data for InfluxDB lives and how it is +### flushed from the WAL. "dir" may need to be changed to a suitable place +### for your system, but the WAL settings are an advanced configuration. The +### defaults should work for most systems. +### + +[data] + dir = "/var/opt/influxdb/data" + + # The following WAL settings are for the b1 storage engine used in 0.9.2. They won't + # apply to any new shards created after upgrading to a version > 0.9.3. + max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB. + wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush. + wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed. + + # These are the WAL settings for the storage engine >= 0.9.3 + wal-dir = "/var/opt/influxdb/wal" + wal-enable-logging = true + + # When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to + # flush to the index + # wal-ready-series-size = 25600 + + # Flush and compact a partition once this ratio of series are over the ready size + # wal-compaction-threshold = 0.6 + + # Force a flush and compaction if any series in a partition gets above this size in bytes + # wal-max-series-size = 2097152 + + # Force a flush of all series and full compaction if there have been no writes in this + # amount of time. This is useful for ensuring that shards that are cold for writes don't + # keep a bunch of data cached in memory and in the WAL. + # wal-flush-cold-interval = "10m" + + # Force a partition to flush its largest series if it reaches this approximate size in + # bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory. + # The more memory you have, the bigger this can be. + # wal-partition-size-threshold = 20971520 + +### +### [cluster] +### +### Controls non-Raft cluster behavior, which generally includes how data is +### shared across shards. +### + +[cluster] + shard-writer-timeout = "5s" # The time within which a shard must respond to write. + write-timeout = "5s" # The time within which a write operation must complete on the cluster. + +### +### [retention] +### +### Controls the enforcement of retention policies for evicting old data. +### + +[retention] + enabled = true + check-interval = "30m" + +### +### Controls the system self-monitoring, statistics and diagnostics. +### +### The retention policy for this data is the default retention policy within +### the internal database. The internal database is created automatically if +### if it does not already exist, as is the default retention policy. If you +### want to use a non-default retention policy, it must be explicitly created. + +[monitor] + store-enabled = true # Whether to record statistics internally. + store-database = "_internal" # The destination database for recorded statistics + store-interval = "10s" # The interval at which to record statistics + +### +### [admin] +### +### Controls the availability of the built-in, web-based admin interface. If HTTPS is +### enabled for the admin interface, HTTPS must also be enabled on the [http] service. +### + +[admin] + enabled = true + bind-address = ":8083" + https-enabled = false + https-certificate = "/etc/ssl/influxdb.pem" + +### +### [http] +### +### Controls how the HTTP endpoints are configured. These are the primary +### mechanism for getting data into and out of InfluxDB. +### + +[http] + enabled = true + bind-address = ":8086" + auth-enabled = false + log-enabled = true + write-tracing = false + pprof-enabled = false + https-enabled = false + https-certificate = "/etc/ssl/influxdb.pem" + +### +### [[graphite]] +### +### Controls one or many listeners for Graphite data. +### + +[[graphite]] + enabled = false + # bind-address = ":2003" + # protocol = "tcp" + # consistency-level = "one" + # name-separator = "." + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # batch-size = 1000 # will flush if this many points get buffered + # batch-pending = 5 # number of batches that may be pending in memory + # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + + ## "name-schema" configures tag names for parsing the metric name from graphite protocol; + ## separated by `name-separator`. + ## The "measurement" tag is special and the corresponding field will become + ## the name of the metric. + ## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as + ## { + ## measurement: "cpu", + ## tags: { + ## "type": "server", + ## "host": "localhost, + ## "device": "cpu0" + ## } + ## } + # name-schema = "type.host.measurement.device" + + ## If set to true, when the input metric name has more fields than `name-schema` specified, + ## the extra fields will be ignored. + ## Otherwise an error will be logged and the metric rejected. + # ignore-unnamed = true + +### +### [collectd] +### +### Controls the listener for collectd data. +### + +[collectd] + enabled = false + # bind-address = "" + # database = "" + # typesdb = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # batch-size = 1000 # will flush if this many points get buffered + # batch-pending = 5 # number of batches that may be pending in memory + # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + +### +### [opentsdb] +### +### Controls the listener for OpenTSDB data. +### + +[opentsdb] + enabled = false + # bind-address = "" + # database = "" + # retention-policy = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Only points + # metrics received over the telnet protocol undergo batching. + + # batch-size = 1000 # will flush if this many points get buffered + # batch-pending = 5 # number of batches that may be pending in memory + # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + +### +### [[udp]] +### +### Controls the listeners for InfluxDB line protocol data via UDP. +### + +[[udp]] + enabled = false + # bind-address = "" + # database = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # batch-size = 1000 # will flush if this many points get buffered + # batch-pending = 5 # number of batches that may be pending in memory + # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + +### +### [continuous_queries] +### +### Controls how continuous queries are run within InfluxDB. +### + +[continuous_queries] + log-enabled = true + enabled = true + recompute-previous-n = 2 + recompute-no-older-than = "10m" + compute-runs-per-interval = 10 + compute-no-more-than = "2m" + +### +### [hinted-handoff] +### +### Controls the hinted handoff feature, which allows nodes to temporarily +### store queued data when one node of a cluster is down for a short period +### of time. +### + +[hinted-handoff] + enabled = true + dir = "/var/opt/influxdb/hh" + max-size = 1073741824 + max-age = "168h" + retry-rate-limit = 0 + retry-interval = "1s" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md new file mode 100644 index 000000000..b8cd9ad8e --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md @@ -0,0 +1,193 @@ +# Import/Export + +## Exporting from 0.8.9 + +Version `0.8.9` of InfluxDB adds support to export your data to a format that can be imported into `0.9.3` and later. + +Note that `0.8.9` can be found here: + +``` +http://get.influxdb.org.s3.amazonaws.com/influxdb_0.8.9_amd64.deb +http://get.influxdb.org.s3.amazonaws.com/influxdb-0.8.9-1.x86_64.rpm +``` + +### Design + +`0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`. You can choose to export them independently (see below). + +The `DDL` section contains the sql commands to create databases and retention policies. the `DML` section is [line protocol](https://github.com/influxdb/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://influxdb.com/docs/v0.9/guides/writing_data.html) in `0.9`. Remember that batching is important and we don't recommend batch sizes over 5k. + +You need to specify a database and shard group when you export. + +To list out your shards, use the following http endpoint: + +`/cluster/shard_spaces` + +example: +```sh +http://username:password@localhost:8086/cluster/shard_spaces +``` + +Then, to export a database with then name "metrics" and a shard space with the name "default", issue the following curl command: + +```sh +curl -o export http://username:password@http://localhost:8086/export/metrics/default +``` + +Compression is supported, and will result in a significantly smaller file size. + +Use the following command for compression: +```sh +curl -o export.gz --compressed http://username:password@http://localhost:8086/export/metrics/default +``` + +You can also export just the `DDL` with this option: + +```sh +curl -o export.ddl http://username:password@http://localhost:8086/export/metrics/default?l=ddl +``` + +Or just the `DML` with this option: + +```sh +curl -o export.dml.gz --compressed http://username:password@http://localhost:8086/export/metrics/default?l=dml +``` + +### Assumptions + +- Series name mapping follows these [guidelines](https://influxdb.com/docs/v0.8/advanced_topics/schema_design.html) +- Database name will map directly from `0.8` to `0.9` +- Shard Spaces map to Retention Policies +- Shard Space Duration is ignored, as in `0.9` we determine shard size automatically +- Regex is used to match the correct series names and only exports that data for the database +- Duration becomes the new Retention Policy duration + +- Users are not migrated due to inability to get passwords. Anyone using users will need to manually set these back up in `0.9` + +### Upgrade Recommendations + +It's recommended that you upgrade to `0.9.3` first and have all your writes going there. Then, on the `0.8.X` instances, upgrade to `0.8.9`. + +It is important that when exporting you change your config to allow for the http endpoints not timing out. To do so, make this change in your config: + +```toml +# Configure the http api +[api] +read-timeout = "0s" +``` + +### Exceptions + +If a series can't be exported to tags based on the guidelines mentioned above, +we will insert the entire series name as the measurement name. You can either +allow that to import into the new InfluxDB instance, or you can do your own +data massage on it prior to importing it. + +For example, if you have the following series name: + +``` +metric.disk.c.host.server01.single +``` + +It will export as exactly thta as the measurement name and no tags: + +``` +metric.disk.c.host.server01.single +``` + +### Export Metrics + +When you export, you will now get comments inline in the `DML`: + +`# Found 999 Series for export` + +As well as count totals for each series exported: + +`# Series FOO - Points Exported: 999` + +With a total at the bottom: + +`# Points Exported: 999` + +You can grep the file that was exported at the end to get all the export metrics: + +`cat myexport | grep Exported` + +## Importing + +Version `0.9.3` of InfluxDB adds support to import your data from version `0.8.9`. + +## Caveats + +For the export/import to work, all requisites have to be met. For export, all series names in `0.8` should be in the following format: + +``` +.... +``` +for example: +``` +az.us-west-1.host.serverA.cpu +``` +or any number of tags +``` +building.2.temperature +``` + +Additionally, the fields need to have a consistent type (all float64, int64, etc) for every write in `0.8`. Otherwise they have the potential to fail writes in the import. +See below for more information. + +## Running the import command + + To import via the cli, you can specify the following command: + + ```sh + influx -import -path=metrics-default.gz -compressed + ``` + + If the file is not compressed you can issue it without the `-compressed` flag: + + ```sh + influx -import -path=metrics-default + ``` + + To redirect failed import lines to another file, run this command: + + ```sh + influx -import -path=metrics-default.gz -compressed > failures + ``` + + The import will use the line protocol in batches of 5,000 lines per batch when sending data to the server. + +### Throttiling the import + + If you need to throttle the import so the database has time to ingest, you can use the `-pps` flag. This will limit the points per second that will be sent to the server. + + ```sh + influx -import -path=metrics-default.gz -compressed -pps 50000 > failures + ``` + + Which is stating that you don't want MORE than 50,000 points per second to write to the database. Due to the processing that is taking place however, you will likely never get exactly 50,000 pps, more like 35,000 pps, etc. + +## Understanding the results of the import + +During the import, a status message will write out for every 100,000 points imported and report stats on the progress of the import: + +``` +2015/08/21 14:48:01 Processed 3100000 lines. Time elapsed: 56.740578415s. Points per second (PPS): 54634 +``` + + The batch will give some basic stats when finished: + + ```sh + 2015/07/29 23:15:20 Processed 2 commands + 2015/07/29 23:15:20 Processed 70207923 inserts + 2015/07/29 23:15:20 Failed 29785000 inserts + ``` + + Most inserts fail due to the following types of error: + + ```sh + 2015/07/29 22:18:28 error writing batch: write failed: field type conflict: input field "value" on measurement "metric" is type float64, already exists as type integer + ``` + + This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes. In `0.9` the field has to have a consistent type. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go new file mode 100644 index 000000000..5095868f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go @@ -0,0 +1,236 @@ +package v8 + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + "log" + "net/url" + "os" + "strings" + "time" + + "github.com/influxdb/influxdb/client" +) + +const batchSize = 5000 + +// Config is the config used to initialize a Importer importer +type Config struct { + Username string + Password string + URL url.URL + Precision string + WriteConsistency string + Path string + Version string + Compressed bool + PPS int +} + +// NewConfig returns an initialized *Config +func NewConfig() *Config { + return &Config{} +} + +// Importer is the importer used for importing 0.8 data +type Importer struct { + client *client.Client + database string + retentionPolicy string + config *Config + batch []string + totalInserts int + failedInserts int + totalCommands int + throttlePointsWritten int + lastWrite time.Time + throttle *time.Ticker +} + +// NewImporter will return an intialized Importer struct +func NewImporter(config *Config) *Importer { + return &Importer{ + config: config, + batch: make([]string, 0, batchSize), + } +} + +// Import processes the specified file in the Config and writes the data to the databases in chunks specified by batchSize +func (i *Importer) Import() error { + // Create a client and try to connect + config := client.NewConfig() + config.URL = i.config.URL + config.Username = i.config.Username + config.Password = i.config.Password + config.UserAgent = fmt.Sprintf("influxDB importer/%s", i.config.Version) + cl, err := client.NewClient(config) + if err != nil { + return fmt.Errorf("could not create client %s", err) + } + i.client = cl + if _, _, e := i.client.Ping(); e != nil { + return fmt.Errorf("failed to connect to %s\n", i.client.Addr()) + } + + // Validate args + if i.config.Path == "" { + return fmt.Errorf("file argument required") + } + + defer func() { + if i.totalInserts > 0 { + log.Printf("Processed %d commands\n", i.totalCommands) + log.Printf("Processed %d inserts\n", i.totalInserts) + log.Printf("Failed %d inserts\n", i.failedInserts) + } + }() + + // Open the file + f, err := os.Open(i.config.Path) + if err != nil { + return err + } + defer f.Close() + + var r io.Reader + + // If gzipped, wrap in a gzip reader + if i.config.Compressed { + gr, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gr.Close() + // Set the reader to the gzip reader + r = gr + } else { + // Standard text file so our reader can just be the file + r = f + } + + // Get our reader + scanner := bufio.NewScanner(r) + + // Process the DDL + i.processDDL(scanner) + + // Set up our throttle channel. Since there is effectively no other activity at this point + // the smaller resolution gets us much closer to the requested PPS + i.throttle = time.NewTicker(time.Microsecond) + defer i.throttle.Stop() + + // Prime the last write + i.lastWrite = time.Now() + + // Process the DML + i.processDML(scanner) + + // Check if we had any errors scanning the file + if err := scanner.Err(); err != nil { + return fmt.Errorf("reading standard input: %s", err) + } + + return nil +} + +func (i *Importer) processDDL(scanner *bufio.Scanner) { + for scanner.Scan() { + line := scanner.Text() + // If we find the DML token, we are done with DDL + if strings.HasPrefix(line, "# DML") { + return + } + if strings.HasPrefix(line, "#") { + continue + } + i.queryExecutor(line) + } +} + +func (i *Importer) processDML(scanner *bufio.Scanner) { + start := time.Now() + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "# CONTEXT-DATABASE:") { + i.database = strings.TrimSpace(strings.Split(line, ":")[1]) + } + if strings.HasPrefix(line, "# CONTEXT-RETENTION-POLICY:") { + i.retentionPolicy = strings.TrimSpace(strings.Split(line, ":")[1]) + } + if strings.HasPrefix(line, "#") { + continue + } + i.batchAccumulator(line, start) + } +} + +func (i *Importer) execute(command string) { + response, err := i.client.Query(client.Query{Command: command, Database: i.database}) + if err != nil { + log.Printf("error: %s\n", err) + return + } + if err := response.Error(); err != nil { + log.Printf("error: %s\n", response.Error()) + } +} + +func (i *Importer) queryExecutor(command string) { + i.totalCommands++ + i.execute(command) +} + +func (i *Importer) batchAccumulator(line string, start time.Time) { + i.batch = append(i.batch, line) + if len(i.batch) == batchSize { + if e := i.batchWrite(); e != nil { + log.Println("error writing batch: ", e) + // Output failed lines to STDOUT so users can capture lines that failed to import + fmt.Println(strings.Join(i.batch, "\n")) + i.failedInserts += len(i.batch) + } else { + i.totalInserts += len(i.batch) + } + i.batch = i.batch[:0] + // Give some status feedback every 100000 lines processed + processed := i.totalInserts + i.failedInserts + if processed%100000 == 0 { + since := time.Since(start) + pps := float64(processed) / since.Seconds() + log.Printf("Processed %d lines. Time elapsed: %s. Points per second (PPS): %d", processed, since.String(), int64(pps)) + } + } +} + +func (i *Importer) batchWrite() error { + // Accumulate the batch size to see how many points we have written this second + i.throttlePointsWritten += len(i.batch) + + // Find out when we last wrote data + since := time.Since(i.lastWrite) + + // Check to see if we've exceeded our points per second for the current timeframe + var currentPPS int + if since.Seconds() > 0 { + currentPPS = int(float64(i.throttlePointsWritten) / since.Seconds()) + } else { + currentPPS = i.throttlePointsWritten + } + + // If our currentPPS is greater than the PPS specified, then we wait and retry + if int(currentPPS) > i.config.PPS && i.config.PPS != 0 { + // Wait for the next tick + <-i.throttle.C + + // Decrement the batch size back out as it is going to get called again + i.throttlePointsWritten -= len(i.batch) + return i.batchWrite() + } + + _, e := i.client.WriteLineProtocol(strings.Join(i.batch, "\n"), i.database, i.retentionPolicy, i.config.Precision, i.config.WriteConsistency) + i.throttlePointsWritten = 0 + i.lastWrite = time.Now() + return e +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md index 3ef272f41..c7ce2aeb4 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md @@ -87,11 +87,11 @@ CREATE CONTINUOUS DATABASE DATABASES DEFAULT DELETE DESC DROP DURATION END EXISTS EXPLAIN FIELD FROM GRANT GROUP IF IN INNER INSERT INTO KEY KEYS LIMIT -SHOW MEASUREMENT MEASUREMENTS OFFSET ON ORDER -PASSWORD POLICY POLICIES PRIVILEGES QUERIES QUERY -READ REPLICATION RETENTION REVOKE SELECT SERIES -SLIMIT SOFFSET TAG TO USER USERS -VALUES WHERE WITH WRITE +SHOW MEASUREMENT MEASUREMENTS NOT OFFSET ON +ORDER PASSWORD POLICY POLICIES PRIVILEGES QUERIES +QUERY READ REPLICATION RETENTION REVOKE SELECT +SERIES SLIMIT SOFFSET TAG TO USER +USERS VALUES WHERE WITH WRITE ``` ## Literals @@ -124,9 +124,7 @@ string_lit = `'` { unicode_char } `'`' . Duration literals specify a length of time. An integer literal followed immediately (with no spaces) by a duration unit listed below is interpreted as a duration literal. -``` -Duration unit definitions -------------------------- +### Duration units | Units | Meaning | |--------|-----------------------------------------| | u or µ | microseconds (1 millionth of a second) | @@ -136,7 +134,6 @@ Duration unit definitions | h | hour | | d | day | | w | week | -``` ``` duration_lit = int_lit duration_unit . @@ -191,6 +188,7 @@ statement = alter_retention_policy_stmt | show_measurements_stmt | show_retention_policies | show_series_stmt | + show_shards_stmt | show_tag_keys_stmt | show_tag_values_stmt | show_users_stmt | @@ -455,7 +453,7 @@ SHOW FIELD KEYS FROM cpu; ### SHOW MEASUREMENTS -show_measurements_stmt = [ where_clause ] [ group_by_clause ] [ limit_clause ] +show_measurements_stmt = "SHOW MEASUREMENTS" [ where_clause ] [ group_by_clause ] [ limit_clause ] [ offset_clause ] . ```sql @@ -482,7 +480,7 @@ SHOW RETENTION POLICIES ON mydb; ### SHOW SERIES ``` -show_series_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ] +show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ group_by_clause ] [ limit_clause ] [ offset_clause ] . ``` @@ -492,10 +490,22 @@ show_series_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ] ``` +### SHOW SHARDS + +``` +show_shards_stmt = "SHOW SHARDS" . +``` + +#### Example: + +```sql +SHOW SHARDS; +``` + ### SHOW TAG KEYS ``` -show_tag_keys_stmt = [ from_clause ] [ where_clause ] [ group_by_clause ] +show_tag_keys_stmt = "SHOW TAG KEYS" [ from_clause ] [ where_clause ] [ group_by_clause ] [ limit_clause ] [ offset_clause ] . ``` @@ -518,7 +528,7 @@ SHOW TAG KEYS WHERE host = 'serverA'; ### SHOW TAG VALUES ``` -show_tag_values_stmt = [ from_clause ] with_tag_clause [ where_clause ] +show_tag_values_stmt = "SHOW TAG VALUES" [ from_clause ] with_tag_clause [ where_clause ] [ group_by_clause ] [ limit_clause ] [ offset_clause ] . ``` @@ -551,7 +561,7 @@ SHOW USERS; ### REVOKE ``` -revoke_stmt = privilege [ "ON" db_name ] "FROM" user_name +revoke_stmt = "REVOKE" privilege [ "ON" db_name ] "FROM" user_name ``` #### Examples: @@ -567,7 +577,7 @@ REVOKE READ ON mydb FROM jdoe; ### SELECT ``` -select_stmt = fields from_clause [ into_clause ] [ where_clause ] +select_stmt = "SELECT" fields from_clause [ into_clause ] [ where_clause ] [ group_by_clause ] [ order_by_clause ] [ limit_clause ] [ offset_clause ] [ slimit_clause ] [ soffset_clause ]. ``` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast.go index 6235e405a..56eb8683e 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" "time" + + "github.com/influxdb/influxdb/pkg/slices" ) // DataType represents the primitive data types available in InfluxQL. @@ -105,6 +107,7 @@ func (*ShowFieldKeysStatement) node() {} func (*ShowRetentionPoliciesStatement) node() {} func (*ShowMeasurementsStatement) node() {} func (*ShowSeriesStatement) node() {} +func (*ShowShardsStatement) node() {} func (*ShowStatsStatement) node() {} func (*ShowDiagnosticsStatement) node() {} func (*ShowTagKeysStatement) node() {} @@ -206,6 +209,7 @@ func (*ShowFieldKeysStatement) stmt() {} func (*ShowMeasurementsStatement) stmt() {} func (*ShowRetentionPoliciesStatement) stmt() {} func (*ShowSeriesStatement) stmt() {} +func (*ShowShardsStatement) stmt() {} func (*ShowStatsStatement) stmt() {} func (*ShowDiagnosticsStatement) stmt() {} func (*ShowTagKeysStatement) stmt() {} @@ -274,7 +278,7 @@ type SortField struct { // String returns a string representation of a sort field func (field *SortField) String() string { var buf bytes.Buffer - if field.Name == "" { + if field.Name != "" { _, _ = buf.WriteString(field.Name) _, _ = buf.WriteString(" ") } @@ -302,12 +306,19 @@ func (a SortFields) String() string { type CreateDatabaseStatement struct { // Name of the database to be created. Name string + + // IfNotExists indicates whether to return without error if the database + // already exists. + IfNotExists bool } // String returns a string representation of the create database statement. func (s *CreateDatabaseStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("CREATE DATABASE ") + if s.IfNotExists { + _, _ = buf.WriteString("IF NOT EXISTS ") + } _, _ = buf.WriteString(s.Name) return buf.String() } @@ -848,6 +859,48 @@ func (s *SelectStatement) RewriteDistinct() { } } +// ColumnNames will walk all fields and functions and return the appropriate field names for the select statement +// while maintaining order of the field names +func (s *SelectStatement) ColumnNames() []string { + // Always set the first column to be time, even if they didn't specify it + columnNames := []string{"time"} + + // First walk each field + for _, field := range s.Fields { + switch f := field.Expr.(type) { + case *Call: + if f.Name == "top" || f.Name == "bottom" { + if len(f.Args) == 2 { + columnNames = append(columnNames, f.Name) + continue + } + // We have a special case now where we have to add the column names for the fields TOP or BOTTOM asked for as well + columnNames = slices.Union(columnNames, f.Fields(), true) + continue + } + columnNames = append(columnNames, field.Name()) + default: + // time is always first, and we already added it, so ignore it if they asked for it anywhere else. + if field.Name() != "time" { + columnNames = append(columnNames, field.Name()) + } + } + } + + return columnNames +} + +// HasTimeFieldSpecified will walk all fields and determine if the user explicitly asked for time +// This is needed to determine re-write behaviors for functions like TOP and BOTTOM +func (s *SelectStatement) HasTimeFieldSpecified() bool { + for _, f := range s.Fields { + if f.Name() == "time" { + return true + } + } + return false +} + // String returns a string representation of the select statement. func (s *SelectStatement) String() string { var buf bytes.Buffer @@ -985,6 +1038,14 @@ func (s *SelectStatement) hasTimeDimensions(node Node) bool { } func (s *SelectStatement) validate(tr targetRequirement) error { + if err := s.validateFields(); err != nil { + return err + } + + if err := s.validateDimensions(); err != nil { + return err + } + if err := s.validateDistinct(); err != nil { return err } @@ -1001,47 +1062,144 @@ func (s *SelectStatement) validate(tr targetRequirement) error { return err } - if err := s.validateWildcard(); err != nil { - return err - } + return nil +} +func (s *SelectStatement) validateFields() error { + ns := s.NamesInSelect() + if len(ns) == 1 && ns[0] == "time" { + return fmt.Errorf("at least 1 non-time field must be queried") + } + return nil +} + +func (s *SelectStatement) validateDimensions() error { + var dur time.Duration + for _, dim := range s.Dimensions { + switch expr := dim.Expr.(type) { + case *Call: + // Ensure the call is time() and it only has one duration argument. + // If we already have a duration + if expr.Name != "time" { + return errors.New("only time() calls allowed in dimensions") + } else if len(expr.Args) != 1 { + return errors.New("time dimension expected one argument") + } else if lit, ok := expr.Args[0].(*DurationLiteral); !ok { + return errors.New("time dimension must have one duration argument") + } else if dur != 0 { + return errors.New("multiple time dimensions not allowed") + } else { + dur = lit.Val + } + case *VarRef: + if strings.ToLower(expr.Val) == "time" { + return errors.New("time() is a function and expects at least one argument") + } + case *Wildcard: + default: + return errors.New("only time and tag dimensions allowed") + } + } + return nil +} + +// validSelectWithAggregate determines if a SELECT statement has the correct +// combination of aggregate functions combined with selected fields and tags +// Currently we don't have support for all aggregates, but aggregates that +// can be combined with fields/tags are: +// TOP, BOTTOM, MAX, MIN, FIRST, LAST +func (s *SelectStatement) validSelectWithAggregate(numAggregates int) error { + if numAggregates != 0 && numAggregates != len(s.Fields) { + return fmt.Errorf("mixing aggregate and non-aggregate queries is not supported") + } return nil } func (s *SelectStatement) validateAggregates(tr targetRequirement) error { - // First, if 1 field is an aggregate, then all fields must be an aggregate. This is - // a explicit limitation of the current system. + // Curently most aggregates can be the ONLY thing in a select statement + // Others, like TOP/BOTTOM can mix aggregates and tags/fields numAggregates := 0 for _, f := range s.Fields { if _, ok := f.Expr.(*Call); ok { numAggregates++ } } - if numAggregates != 0 && numAggregates != len(s.Fields) { - return fmt.Errorf("mixing aggregate and non-aggregate queries is not supported") - } - // Secondly, determine if specific calls have at least one and only one argument for _, f := range s.Fields { - if c, ok := f.Expr.(*Call); ok { - switch c.Name { + switch expr := f.Expr.(type) { + case *Call: + switch expr.Name { case "derivative", "non_negative_derivative": - if min, max, got := 1, 2, len(c.Args); got > max || got < min { - return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", c.Name, min, max, got) + if err := s.validSelectWithAggregate(numAggregates); err != nil { + return err } + if min, max, got := 1, 2, len(expr.Args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got) + } + // Validate that if they have a time dimension, they need a sub-call like min/max, etc. + if s.hasTimeDimensions(s.Condition) { + if _, ok := expr.Args[0].(*Call); !ok { + return fmt.Errorf("aggregate function required inside the call to %s", expr.Name) + } + } + case "percentile": - if exp, got := 2, len(c.Args); got != exp { - return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", c.Name, exp, got) + if err := s.validSelectWithAggregate(numAggregates); err != nil { + return err + } + if exp, got := 2, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + _, ok := expr.Args[1].(*NumberLiteral) + if !ok { + return fmt.Errorf("expected float argument in percentile()") + } + case "top", "bottom": + if exp, got := 2, len(expr.Args); got < exp { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d, got %d", expr.Name, exp, got) + } + if len(expr.Args) > 1 { + callLimit, ok := expr.Args[len(expr.Args)-1].(*NumberLiteral) + if !ok { + return fmt.Errorf("expected integer as last argument in %s(), found %s", expr.Name, expr.Args[len(expr.Args)-1]) + } + // Check if they asked for a limit smaller than what they passed into the call + if int64(callLimit.Val) > int64(s.Limit) && s.Limit != 0 { + return fmt.Errorf("limit (%d) in %s function can not be larger than the LIMIT (%d) in the select statement", int64(callLimit.Val), expr.Name, int64(s.Limit)) + } + + for _, v := range expr.Args[:len(expr.Args)-1] { + if _, ok := v.(*VarRef); !ok { + return fmt.Errorf("only fields or tags are allowed in %s(), found %s", expr.Name, v) + } + } } default: - if exp, got := 1, len(c.Args); got != exp { - return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", c.Name, exp, got) + if err := s.validSelectWithAggregate(numAggregates); err != nil { + return err + } + if exp, got := 1, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + switch fc := expr.Args[0].(type) { + case *VarRef: + // do nothing + case *Call: + if fc.Name != "distinct" { + return fmt.Errorf("expected field argument in %s()", expr.Name) + } + case *Distinct: + if expr.Name != "count" { + return fmt.Errorf("expected field argument in %s()", expr.Name) + } + default: + return fmt.Errorf("expected field argument in %s()", expr.Name) } } } } - // Now, check that we have valid duration and where clauses for aggregates + // Check that we have valid duration and where clauses for aggregates // fetch the group by duration groupByDuration, _ := s.GroupByInterval() @@ -1060,13 +1218,6 @@ func (s *SelectStatement) validateAggregates(tr targetRequirement) error { return nil } -func (s *SelectStatement) validateWildcard() error { - if s.HasWildcard() && len(s.Fields) > 1 { - return fmt.Errorf("wildcards can not be combined with other fields") - } - return nil -} - func (s *SelectStatement) HasDistinct() bool { // determine if we have a call named distinct for _, f := range s.Fields { @@ -1489,6 +1640,9 @@ func (t *Target) String() string { var buf bytes.Buffer _, _ = buf.WriteString("INTO ") _, _ = buf.WriteString(t.Measurement.String()) + if t.Measurement.Name == "" { + _, _ = buf.WriteString(":MEASUREMENT") + } return buf.String() } @@ -1819,6 +1973,17 @@ func (s *ShowStatsStatement) RequiredPrivileges() ExecutionPrivileges { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} } +// ShowShardsStatement represents a command for displaying shards in the cluster. +type ShowShardsStatement struct{} + +// String returns a string representation. +func (s *ShowShardsStatement) String() string { return "SHOW SHARDS" } + +// RequiredPrivileges returns the privileges required to execute the statement. +func (s *ShowShardsStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + // ShowDiagnosticsStatement represents a command for show node diagnostics. type ShowDiagnosticsStatement struct{} @@ -2088,37 +2253,21 @@ func (a Dimensions) String() string { // Normalize returns the interval and tag dimensions separately. // Returns 0 if no time interval is specified. -// Returns an error if multiple time dimensions exist or if non-VarRef dimensions are specified. -func (a Dimensions) Normalize() (time.Duration, []string, error) { +func (a Dimensions) Normalize() (time.Duration, []string) { var dur time.Duration var tags []string for _, dim := range a { switch expr := dim.Expr.(type) { case *Call: - // Ensure the call is time() and it only has one duration argument. - // If we already have a duration - if expr.Name != "time" { - return 0, nil, errors.New("only time() calls allowed in dimensions") - } else if len(expr.Args) != 1 { - return 0, nil, errors.New("time dimension expected one argument") - } else if lit, ok := expr.Args[0].(*DurationLiteral); !ok { - return 0, nil, errors.New("time dimension must have one duration argument") - } else if dur != 0 { - return 0, nil, errors.New("multiple time dimensions not allowed") - } else { - dur = lit.Val - } - + lit, _ := expr.Args[0].(*DurationLiteral) + dur = lit.Val case *VarRef: tags = append(tags, expr.Val) - - default: - return 0, nil, errors.New("only time and tag dimensions allowed") } } - return dur, tags, nil + return dur, tags } // Dimension represents an expression that a select statement is grouped by. @@ -2147,6 +2296,7 @@ type Measurement struct { RetentionPolicy string Name string Regex *RegexLiteral + IsTarget bool } // String returns a string representation of the measurement. @@ -2205,6 +2355,33 @@ func (c *Call) String() string { return fmt.Sprintf("%s(%s)", c.Name, strings.Join(str, ", ")) } +// Fields will extract any field names from the call. Only specific calls support this. +func (c *Call) Fields() []string { + switch c.Name { + case "top", "bottom": + // maintain the order the user specified in the query + keyMap := make(map[string]struct{}) + keys := []string{} + for i, a := range c.Args { + if i == 0 { + // special case, first argument is always the name of the function regardless of the field name + keys = append(keys, c.Name) + continue + } + switch v := a.(type) { + case *VarRef: + if _, ok := keyMap[v.Val]; !ok { + keyMap[v.Val] = struct{}{} + keys = append(keys, v.Val) + } + } + } + return keys + default: + return []string{} + } +} + // Distinct represents a DISTINCT expression. type Distinct struct { // Identifier following DISTINCT diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast_test.go index b58e7353b..ca2836111 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast_test.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/ast_test.go @@ -451,7 +451,7 @@ func TestSelectStatement_IsRawQuerySet(t *testing.T) { isRaw: false, }, { - stmt: "select mean(*) from foo group by *", + stmt: "select mean(value) from foo group by *", isRaw: false, }, } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/functions_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/functions_test.go deleted file mode 100644 index 56303d8d9..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/functions_test.go +++ /dev/null @@ -1,534 +0,0 @@ -package influxql - -import ( - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" -) - -import "sort" - -type point struct { - seriesKey string - time int64 - value interface{} -} - -type testIterator struct { - values []point -} - -func (t *testIterator) Next() (timestamp int64, value interface{}) { - if len(t.values) > 0 { - v := t.values[0] - t.values = t.values[1:] - return v.time, v.value - } - - return -1, nil -} - -func TestMapMeanNoValues(t *testing.T) { - iter := &testIterator{} - if got := MapMean(iter); got != nil { - t.Errorf("output mismatch: exp nil got %v", got) - } -} - -func TestMapMean(t *testing.T) { - - tests := []struct { - input []point - output *meanMapOutput - }{ - { // Single point - input: []point{point{"0", 1, 1.0}}, - output: &meanMapOutput{1, 1, Float64Type}, - }, - { // Two points - input: []point{ - point{"0", 1, 2.0}, - point{"0", 2, 8.0}, - }, - output: &meanMapOutput{2, 5.0, Float64Type}, - }, - } - - for _, test := range tests { - iter := &testIterator{ - values: test.input, - } - - got := MapMean(iter) - if got == nil { - t.Fatalf("MapMean(%v): output mismatch: exp %v got %v", test.input, test.output, got) - } - - if got.(*meanMapOutput).Count != test.output.Count || got.(*meanMapOutput).Mean != test.output.Mean { - t.Errorf("output mismatch: exp %v got %v", test.output, got) - } - } -} -func TestInitializeMapFuncPercentile(t *testing.T) { - // No args - c := &Call{ - Name: "percentile", - Args: []Expr{}, - } - _, err := InitializeMapFunc(c) - if err == nil { - t.Errorf("InitializeMapFunc(%v) expected error. got nil", c) - } - - if exp := "expected two arguments for percentile()"; err.Error() != exp { - t.Errorf("InitializeMapFunc(%v) mismatch. exp %v got %v", c, exp, err.Error()) - } - - // No percentile arg - c = &Call{ - Name: "percentile", - Args: []Expr{ - &VarRef{Val: "field1"}, - }, - } - - _, err = InitializeMapFunc(c) - if err == nil { - t.Errorf("InitializeMapFunc(%v) expected error. got nil", c) - } - - if exp := "expected two arguments for percentile()"; err.Error() != exp { - t.Errorf("InitializeMapFunc(%v) mismatch. exp %v got %v", c, exp, err.Error()) - } -} - -func TestInitializeMapFuncDerivative(t *testing.T) { - - for _, fn := range []string{"derivative", "non_negative_derivative"} { - // No args should fail - c := &Call{ - Name: fn, - Args: []Expr{}, - } - - _, err := InitializeMapFunc(c) - if err == nil { - t.Errorf("InitializeMapFunc(%v) expected error. got nil", c) - } - - // Single field arg should return MapEcho - c = &Call{ - Name: fn, - Args: []Expr{ - &VarRef{Val: " field1"}, - &DurationLiteral{Val: time.Hour}, - }, - } - - _, err = InitializeMapFunc(c) - if err != nil { - t.Errorf("InitializeMapFunc(%v) unexpected error. got %v", c, err) - } - - // Nested Aggregate func should return the map func for the nested aggregate - c = &Call{ - Name: fn, - Args: []Expr{ - &Call{Name: "mean", Args: []Expr{&VarRef{Val: "field1"}}}, - &DurationLiteral{Val: time.Hour}, - }, - } - - _, err = InitializeMapFunc(c) - if err != nil { - t.Errorf("InitializeMapFunc(%v) unexpected error. got %v", c, err) - } - } -} - -func TestInitializeReduceFuncPercentile(t *testing.T) { - // No args - c := &Call{ - Name: "percentile", - Args: []Expr{}, - } - _, err := InitializeReduceFunc(c) - if err == nil { - t.Errorf("InitializedReduceFunc(%v) expected error. got nil", c) - } - - if exp := "expected float argument in percentile()"; err.Error() != exp { - t.Errorf("InitializedReduceFunc(%v) mismatch. exp %v got %v", c, exp, err.Error()) - } - - // No percentile arg - c = &Call{ - Name: "percentile", - Args: []Expr{ - &VarRef{Val: "field1"}, - }, - } - - _, err = InitializeReduceFunc(c) - if err == nil { - t.Errorf("InitializedReduceFunc(%v) expected error. got nil", c) - } - - if exp := "expected float argument in percentile()"; err.Error() != exp { - t.Errorf("InitializedReduceFunc(%v) mismatch. exp %v got %v", c, exp, err.Error()) - } -} - -func TestReducePercentileNil(t *testing.T) { - - // ReducePercentile should ignore nil values when calculating the percentile - fn := ReducePercentile(100) - input := []interface{}{ - nil, - } - - got := fn(input) - if got != nil { - t.Fatalf("ReducePercentile(100) returned wrong type. exp nil got %v", got) - } -} - -func TestMapDistinct(t *testing.T) { - const ( // prove that we're ignoring seriesKey - seriesKey1 = "1" - seriesKey2 = "2" - ) - - const ( // prove that we're ignoring time - timeId1 = iota + 1 - timeId2 - timeId3 - timeId4 - timeId5 - timeId6 - ) - - iter := &testIterator{ - values: []point{ - {seriesKey1, timeId1, uint64(1)}, - {seriesKey1, timeId2, uint64(1)}, - {seriesKey1, timeId3, "1"}, - {seriesKey2, timeId4, uint64(1)}, - {seriesKey2, timeId5, float64(1.0)}, - {seriesKey2, timeId6, "1"}, - }, - } - - values := MapDistinct(iter).(distinctValues) - - if exp, got := 3, len(values); exp != got { - t.Errorf("Wrong number of values. exp %v got %v", exp, got) - } - - sort.Sort(values) - - exp := distinctValues{ - uint64(1), - float64(1), - "1", - } - - if !reflect.DeepEqual(values, exp) { - t.Errorf("Wrong values. exp %v got %v", spew.Sdump(exp), spew.Sdump(values)) - } -} - -func TestMapDistinctNil(t *testing.T) { - iter := &testIterator{ - values: []point{}, - } - - values := MapDistinct(iter) - - if values != nil { - t.Errorf("Wrong values. exp nil got %v", spew.Sdump(values)) - } -} - -func TestReduceDistinct(t *testing.T) { - v1 := distinctValues{ - "2", - "1", - float64(2.0), - float64(1), - uint64(2), - uint64(1), - true, - false, - } - - expect := distinctValues{ - uint64(1), - float64(1), - uint64(2), - float64(2), - false, - true, - "1", - "2", - } - - got := ReduceDistinct([]interface{}{v1, v1, expect}) - - if !reflect.DeepEqual(got, expect) { - t.Errorf("Wrong values. exp %v got %v", spew.Sdump(expect), spew.Sdump(got)) - } -} - -func TestReduceDistinctNil(t *testing.T) { - tests := []struct { - name string - values []interface{} - }{ - { - name: "nil values", - values: nil, - }, - { - name: "nil mapper", - values: []interface{}{nil}, - }, - { - name: "no mappers", - values: []interface{}{}, - }, - { - name: "empty mappper (len 1)", - values: []interface{}{distinctValues{}}, - }, - { - name: "empty mappper (len 2)", - values: []interface{}{distinctValues{}, distinctValues{}}, - }, - } - - for _, test := range tests { - t.Log(test.name) - got := ReduceDistinct(test.values) - if got != nil { - t.Errorf("Wrong values. exp nil got %v", spew.Sdump(got)) - } - } -} - -func Test_distinctValues_Sort(t *testing.T) { - values := distinctValues{ - "2", - "1", - float64(2.0), - float64(1), - uint64(2), - uint64(1), - true, - false, - } - - expect := distinctValues{ - uint64(1), - float64(1), - uint64(2), - float64(2), - false, - true, - "1", - "2", - } - - sort.Sort(values) - - if !reflect.DeepEqual(values, expect) { - t.Errorf("Wrong values. exp %v got %v", spew.Sdump(expect), spew.Sdump(values)) - } -} - -func TestMapCountDistinct(t *testing.T) { - const ( // prove that we're ignoring seriesKey - seriesKey1 = "1" - seriesKey2 = "2" - ) - - const ( // prove that we're ignoring time - timeId1 = iota + 1 - timeId2 - timeId3 - timeId4 - timeId5 - timeId6 - timeId7 - ) - - iter := &testIterator{ - values: []point{ - {seriesKey1, timeId1, uint64(1)}, - {seriesKey1, timeId2, uint64(1)}, - {seriesKey1, timeId3, "1"}, - {seriesKey2, timeId4, uint64(1)}, - {seriesKey2, timeId5, float64(1.0)}, - {seriesKey2, timeId6, "1"}, - {seriesKey2, timeId7, true}, - }, - } - - values := MapCountDistinct(iter).(map[interface{}]struct{}) - - if exp, got := 4, len(values); exp != got { - t.Errorf("Wrong number of values. exp %v got %v", exp, got) - } - - exp := map[interface{}]struct{}{ - uint64(1): struct{}{}, - float64(1): struct{}{}, - "1": struct{}{}, - true: struct{}{}, - } - - if !reflect.DeepEqual(values, exp) { - t.Errorf("Wrong values. exp %v got %v", spew.Sdump(exp), spew.Sdump(values)) - } -} - -func TestMapCountDistinctNil(t *testing.T) { - iter := &testIterator{ - values: []point{}, - } - - values := MapCountDistinct(iter) - - if values != nil { - t.Errorf("Wrong values. exp nil got %v", spew.Sdump(values)) - } -} - -func TestReduceCountDistinct(t *testing.T) { - v1 := map[interface{}]struct{}{ - "2": struct{}{}, - "1": struct{}{}, - float64(2.0): struct{}{}, - float64(1): struct{}{}, - uint64(2): struct{}{}, - uint64(1): struct{}{}, - true: struct{}{}, - false: struct{}{}, - } - - v2 := map[interface{}]struct{}{ - uint64(1): struct{}{}, - float64(1): struct{}{}, - uint64(2): struct{}{}, - float64(2): struct{}{}, - false: struct{}{}, - true: struct{}{}, - "1": struct{}{}, - "2": struct{}{}, - } - - exp := 8 - got := ReduceCountDistinct([]interface{}{v1, v1, v2}) - - if !reflect.DeepEqual(got, exp) { - t.Errorf("Wrong values. exp %v got %v", spew.Sdump(exp), spew.Sdump(got)) - } -} - -func TestReduceCountDistinctNil(t *testing.T) { - emptyResults := make(map[interface{}]struct{}) - tests := []struct { - name string - values []interface{} - }{ - { - name: "nil values", - values: nil, - }, - { - name: "nil mapper", - values: []interface{}{nil}, - }, - { - name: "no mappers", - values: []interface{}{}, - }, - { - name: "empty mappper (len 1)", - values: []interface{}{emptyResults}, - }, - { - name: "empty mappper (len 2)", - values: []interface{}{emptyResults, emptyResults}, - }, - } - - for _, test := range tests { - t.Log(test.name) - got := ReduceCountDistinct(test.values) - if got != 0 { - t.Errorf("Wrong values. exp nil got %v", spew.Sdump(got)) - } - } -} - -var getSortedRangeData = []float64{ - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -} - -var getSortedRangeTests = []struct { - name string - data []float64 - start int - count int - expected []float64 -}{ - {"first 5", getSortedRangeData, 0, 5, []float64{0, 1, 2, 3, 4}}, - {"0 length", getSortedRangeData, 8, 0, []float64{}}, - {"past end of data", getSortedRangeData, len(getSortedRangeData) - 3, 5, []float64{67, 68, 69}}, -} - -func TestGetSortedRange(t *testing.T) { - for _, tt := range getSortedRangeTests { - results := getSortedRange(tt.data, tt.start, tt.count) - if len(results) != len(tt.expected) { - t.Errorf("Test %s error. Expected getSortedRange to return %v but got %v", tt.name, tt.expected, results) - } - for i, point := range tt.expected { - if point != results[i] { - t.Errorf("Test %s error. getSortedRange returned wrong result for index %v. Expected %v but got %v", tt.name, i, point, results[i]) - } - } - } -} - -var benchGetSortedRangeResults []float64 - -func BenchmarkGetSortedRangeByPivot(b *testing.B) { - data := make([]float64, len(getSortedRangeData)) - var results []float64 - for i := 0; i < b.N; i++ { - copy(data, getSortedRangeData) - results = getSortedRange(data, 8, 15) - } - benchGetSortedRangeResults = results -} - -func BenchmarkGetSortedRangeBySort(b *testing.B) { - data := make([]float64, len(getSortedRangeData)) - var results []float64 - for i := 0; i < b.N; i++ { - copy(data, getSortedRangeData) - sort.Float64s(data) - results = data[8:23] - } - benchGetSortedRangeResults = results -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser.go index b2c51e595..4c55e5e84 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser.go @@ -129,6 +129,8 @@ func (p *Parser) parseShowStatement() (Statement, error) { return nil, newParseError(tokstr(tok, lit), []string{"POLICIES"}, pos) case SERIES: return p.parseShowSeriesStatement() + case SHARDS: + return p.parseShowShardsStatement() case STATS: return p.parseShowStatsStatement() case DIAGNOSTICS: @@ -488,6 +490,9 @@ func (p *Parser) parseSegmentedIdents() ([]string, error) { if ch := p.peekRune(); ch == '/' { // Next segment is a regex so we're done. break + } else if ch == ':' { + // Next segment is context-specific so let caller handle it. + break } else if ch == '.' { // Add an empty identifier. idents = append(idents, "") @@ -799,7 +804,18 @@ func (p *Parser) parseTarget(tr targetRequirement) (*Target, error) { return nil, err } - t := &Target{Measurement: &Measurement{}} + if len(idents) < 3 { + // Check for source measurement reference. + if ch := p.peekRune(); ch == ':' { + if err := p.parseTokens([]Token{COLON, MEASUREMENT}); err != nil { + return nil, err + } + // Append empty measurement name. + idents = append(idents, "") + } + } + + t := &Target{Measurement: &Measurement{IsTarget: true}} switch len(idents) { case 1: @@ -1250,6 +1266,16 @@ func (p *Parser) parseCreateContinuousQueryStatement() (*CreateContinuousQuerySt func (p *Parser) parseCreateDatabaseStatement() (*CreateDatabaseStatement, error) { stmt := &CreateDatabaseStatement{} + // Look for "IF NOT EXISTS" + if tok, _, _ := p.scanIgnoreWhitespace(); tok == IF { + if err := p.parseTokens([]Token{NOT, EXISTS}); err != nil { + return nil, err + } + stmt.IfNotExists = true + } else { + p.unscan() + } + // Parse the name of the database to be created. lit, err := p.parseIdent() if err != nil { @@ -1385,6 +1411,12 @@ func (p *Parser) parseRetentionPolicy() (name string, dfault bool, err error) { return } +// parseShowShardsStatement parses a string for "SHOW SHARDS" statement. +// This function assumes the "SHOW SHARDS" tokens have already been consumed. +func (p *Parser) parseShowShardsStatement() (*ShowShardsStatement, error) { + return &ShowShardsStatement{}, nil +} + // parseShowStatsStatement parses a string and returns a ShowStatsStatement. // This function assumes the "SHOW STATS" tokens have already been consumed. func (p *Parser) parseShowStatsStatement() (*ShowStatsStatement, error) { @@ -1441,13 +1473,6 @@ func (p *Parser) parseDropContinuousQueryStatement() (*DropContinuousQueryStatem func (p *Parser) parseFields() (Fields, error) { var fields Fields - // Check for "*" (i.e., "all fields") - if tok, _, _ := p.scanIgnoreWhitespace(); tok == MUL { - fields = append(fields, &Field{&Wildcard{}, ""}) - return fields, nil - } - p.unscan() - for { // Parse the field. f, err := p.parseField() @@ -1777,24 +1802,29 @@ func (p *Parser) parseOrderBy() (SortFields, error) { func (p *Parser) parseSortFields() (SortFields, error) { var fields SortFields - // If first token is ASC or DESC, all fields are sorted. - if tok, pos, lit := p.scanIgnoreWhitespace(); tok == ASC || tok == DESC { - if tok == DESC { - // Token must be ASC, until other sort orders are supported. - return nil, errors.New("only ORDER BY time ASC supported at this time") + tok, pos, lit := p.scanIgnoreWhitespace() + + switch tok { + // The first field after an order by may not have a field name (e.g. ORDER BY ASC) + case ASC, DESC: + fields = append(fields, &SortField{Ascending: (tok == ASC)}) + // If it's a token, parse it as a sort field. At least one is required. + case IDENT: + p.unscan() + field, err := p.parseSortField() + if err != nil { + return nil, err } - return append(fields, &SortField{Ascending: (tok == ASC)}), nil - } else if tok != IDENT { + + if lit != "time" { + return nil, errors.New("only ORDER BY time supported at this time") + } + + fields = append(fields, field) + // Parse error... + default: return nil, newParseError(tokstr(tok, lit), []string{"identifier", "ASC", "DESC"}, pos) } - p.unscan() - - // At least one field is required. - field, err := p.parseSortField() - if err != nil { - return nil, err - } - fields = append(fields, field) // Parse additional fields. for { @@ -1813,9 +1843,8 @@ func (p *Parser) parseSortFields() (SortFields, error) { fields = append(fields, field) } - // First SortField must be time ASC, until other sort orders are supported. - if len(fields) > 1 || fields[0].Name != "time" || !fields[0].Ascending { - return nil, errors.New("only ORDER BY time ASC supported at this time") + if len(fields) > 1 { + return nil, errors.New("only ORDER BY time supported at this time") } return fields, nil diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser_test.go index 71ccbffc3..94944398a 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser_test.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/parser_test.go @@ -73,11 +73,45 @@ func TestParser_ParseStatement(t *testing.T) { Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, }, }, + { + s: `SELECT * FROM myseries GROUP BY *`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.Wildcard{}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}}, + }, + }, + { + s: `SELECT field1, * FROM myseries GROUP BY *`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "field1"}}, + {Expr: &influxql.Wildcard{}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}}, + }, + }, + { + s: `SELECT *, field1 FROM myseries GROUP BY *`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.Wildcard{}}, + {Expr: &influxql.VarRef{Val: "field1"}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + Dimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}}, + }, + }, // SELECT statement { - skip: true, - s: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY ASC LIMIT 20 OFFSET 10;`, now.UTC().Format(time.RFC3339Nano)), + s: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY DESC LIMIT 20 OFFSET 10;`, now.UTC().Format(time.RFC3339Nano)), stmt: &influxql.SelectStatement{ IsRawQuery: false, Fields: []*influxql.Field{ @@ -101,12 +135,32 @@ func TestParser_ParseStatement(t *testing.T) { }, Dimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: "time", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 10 * time.Hour}}}}}, SortFields: []*influxql.SortField{ - {Ascending: true}, + {Ascending: false}, }, Limit: 20, Offset: 10, }, }, + { + s: `SELECT "foo.bar.baz" AS foo FROM myseries`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "foo.bar.baz"}, Alias: "foo"}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, + }, + }, + { + s: `SELECT "foo.bar.baz" AS foo FROM foo`, + stmt: &influxql.SelectStatement{ + IsRawQuery: true, + Fields: []*influxql.Field{ + {Expr: &influxql.VarRef{Val: "foo.bar.baz"}, Alias: "foo"}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "foo"}}, + }, + }, // derivative { @@ -214,6 +268,65 @@ func TestParser_ParseStatement(t *testing.T) { }, }, + // select percentile statements + { + s: `select percentile("field1", 2.0) from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "percentile", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2.0}}}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + + // select top statements + { + s: `select top("field1", 2) from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + + { + s: `select top(field1, 2) from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + + { + s: `select top(field1, 2), tag1 from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 2}}}}, + {Expr: &influxql.VarRef{Val: "tag1"}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + + { + s: `select top(field1, tag1, 2), tag1 from cpu`, + stmt: &influxql.SelectStatement{ + IsRawQuery: false, + Fields: []*influxql.Field{ + {Expr: &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.VarRef{Val: "tag1"}, &influxql.NumberLiteral{Val: 2}}}}, + {Expr: &influxql.VarRef{Val: "tag1"}}, + }, + Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, + }, + }, + // select distinct statements { s: `select distinct(field1) from cpu`, @@ -587,17 +700,17 @@ func TestParser_ParseStatement(t *testing.T) { // SHOW SERIES WHERE with ORDER BY and LIMIT { skip: true, - s: `SHOW SERIES WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`, + s: `SHOW SERIES WHERE region = 'order by desc' ORDER BY DESC, field1, field2 DESC LIMIT 10`, stmt: &influxql.ShowSeriesStatement{ Condition: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "region"}, - RHS: &influxql.StringLiteral{Val: "uswest"}, + RHS: &influxql.StringLiteral{Val: "order by desc"}, }, SortFields: []*influxql.SortField{ - {Ascending: true}, - {Name: "field1"}, - {Name: "field2"}, + &influxql.SortField{Ascending: false}, + &influxql.SortField{Name: "field1", Ascending: true}, + &influxql.SortField{Name: "field2"}, }, Limit: 10, }, @@ -830,7 +943,7 @@ func TestParser_ParseStatement(t *testing.T) { Database: "testdb", Source: &influxql.SelectStatement{ Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}}, - Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1"}}, + Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}}, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, Dimensions: []*influxql.Dimension{ { @@ -854,7 +967,7 @@ func TestParser_ParseStatement(t *testing.T) { Source: &influxql.SelectStatement{ IsRawQuery: true, Fields: []*influxql.Field{{Expr: &influxql.Wildcard{}}}, - Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1"}}, + Target: &influxql.Target{Measurement: &influxql.Measurement{Name: "measure1", IsTarget: true}}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu_load_short"}}, }, }, @@ -869,7 +982,7 @@ func TestParser_ParseStatement(t *testing.T) { Source: &influxql.SelectStatement{ Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "count", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}}}}}, Target: &influxql.Target{ - Measurement: &influxql.Measurement{RetentionPolicy: "1h.policy1", Name: "cpu.load"}, + Measurement: &influxql.Measurement{RetentionPolicy: "1h.policy1", Name: "cpu.load", IsTarget: true}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, Dimensions: []*influxql.Dimension{ @@ -896,7 +1009,7 @@ func TestParser_ParseStatement(t *testing.T) { IsRawQuery: true, Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "value"}}}, Target: &influxql.Target{ - Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "value"}, + Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "value", IsTarget: true}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, }, @@ -914,18 +1027,52 @@ func TestParser_ParseStatement(t *testing.T) { Fields: []*influxql.Field{{Expr: &influxql.VarRef{Val: "transmit_rx"}}, {Expr: &influxql.VarRef{Val: "transmit_tx"}}}, Target: &influxql.Target{ - Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "network"}, + Measurement: &influxql.Measurement{RetentionPolicy: "policy1", Name: "network", IsTarget: true}, }, Sources: []influxql.Source{&influxql.Measurement{Name: "myseries"}}, }, }, }, + // CREATE CONTINUOUS QUERY with backreference measurement name + { + s: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT mean(value) INTO "policy1".:measurement FROM /^[a-z]+.*/ GROUP BY time(1m) END`, + stmt: &influxql.CreateContinuousQueryStatement{ + Name: "myquery", + Database: "testdb", + Source: &influxql.SelectStatement{ + Fields: []*influxql.Field{{Expr: &influxql.Call{Name: "mean", Args: []influxql.Expr{&influxql.VarRef{Val: "value"}}}}}, + Target: &influxql.Target{ + Measurement: &influxql.Measurement{RetentionPolicy: "policy1", IsTarget: true}, + }, + Sources: []influxql.Source{&influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`^[a-z]+.*`)}}}, + Dimensions: []*influxql.Dimension{ + { + Expr: &influxql.Call{ + Name: "time", + Args: []influxql.Expr{ + &influxql.DurationLiteral{Val: 1 * time.Minute}, + }, + }, + }, + }, + }, + }, + }, + // CREATE DATABASE statement { s: `CREATE DATABASE testdb`, stmt: &influxql.CreateDatabaseStatement{ - Name: "testdb", + Name: "testdb", + IfNotExists: false, + }, + }, + { + s: `CREATE DATABASE IF NOT EXISTS testdb`, + stmt: &influxql.CreateDatabaseStatement{ + Name: "testdb", + IfNotExists: true, }, }, @@ -1197,6 +1344,12 @@ func TestParser_ParseStatement(t *testing.T) { }, }, + // SHOW SHARDS + { + s: `SHOW SHARDS`, + stmt: &influxql.ShowShardsStatement{}, + }, + // SHOW DIAGNOSTICS { s: `SHOW DIAGNOSTICS`, @@ -1206,12 +1359,28 @@ func TestParser_ParseStatement(t *testing.T) { // Errors {s: ``, err: `found EOF, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET at line 1, char 1`}, {s: `SELECT`, err: `found EOF, expected identifier, string, number, bool at line 1, char 8`}, + {s: `SELECT time FROM myseries`, err: `at least 1 non-time field must be queried`}, {s: `blah blah`, err: `found blah, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET at line 1, char 1`}, {s: `SELECT field1 X`, err: `found X, expected FROM at line 1, char 15`}, {s: `SELECT field1 FROM "series" WHERE X +;`, err: `found ;, expected identifier, string, number, bool at line 1, char 38`}, {s: `SELECT field1 FROM myseries GROUP`, err: `found EOF, expected BY at line 1, char 35`}, {s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected number at line 1, char 35`}, {s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `fractional parts not allowed in LIMIT at line 1, char 35`}, + {s: `SELECT top() FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 0`}, + {s: `SELECT top(field1) FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 1`}, + {s: `SELECT top(field1,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, + {s: `SELECT top(field1,host,server,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, + {s: `SELECT top(field1,5,server,2) FROM myseries`, err: `only fields or tags are allowed in top(), found 5.000`}, + {s: `SELECT top(field1,max(foo),server,2) FROM myseries`, err: `only fields or tags are allowed in top(), found max(foo)`}, + {s: `SELECT bottom() FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 0`}, + {s: `SELECT bottom(field1) FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, + {s: `SELECT bottom(field1,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, + {s: `SELECT bottom(field1,host,server,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, + {s: `SELECT bottom(field1,5,server,2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found 5.000`}, + {s: `SELECT bottom(field1,max(foo),server,2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(foo)`}, + {s: `SELECT percentile() FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 0`}, + {s: `SELECT percentile(field1) FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 1`}, + {s: `SELECT percentile(field1, foo) FROM myseries`, err: `expected float argument in percentile()`}, {s: `SELECT field1 FROM myseries OFFSET`, err: `found EOF, expected number at line 1, char 36`}, {s: `SELECT field1 FROM myseries OFFSET 10.5`, err: `fractional parts not allowed in OFFSET at line 1, char 36`}, {s: `SELECT field1 FROM myseries ORDER`, err: `found EOF, expected BY at line 1, char 35`}, @@ -1219,19 +1388,20 @@ func TestParser_ParseStatement(t *testing.T) { {s: `SELECT field1 FROM myseries ORDER BY /`, err: `found /, expected identifier, ASC, DESC at line 1, char 38`}, {s: `SELECT field1 FROM myseries ORDER BY 1`, err: `found 1, expected identifier, ASC, DESC at line 1, char 38`}, {s: `SELECT field1 FROM myseries ORDER BY time ASC,`, err: `found EOF, expected identifier at line 1, char 47`}, - {s: `SELECT field1 FROM myseries ORDER BY DESC`, err: `only ORDER BY time ASC supported at this time`}, - {s: `SELECT field1 FROM myseries ORDER BY field1`, err: `only ORDER BY time ASC supported at this time`}, - {s: `SELECT field1 FROM myseries ORDER BY time DESC`, err: `only ORDER BY time ASC supported at this time`}, - {s: `SELECT field1 FROM myseries ORDER BY time, field1`, err: `only ORDER BY time ASC supported at this time`}, + {s: `SELECT field1 FROM myseries ORDER BY time, field1`, err: `only ORDER BY time supported at this time`}, {s: `SELECT field1 AS`, err: `found EOF, expected identifier at line 1, char 18`}, {s: `SELECT field1 FROM foo group by time(1s)`, err: `GROUP BY requires at least one aggregate function`}, {s: `SELECT count(value), value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`}, {s: `SELECT count(value) FROM foo group by time(1s)`, err: `aggregate functions with GROUP BY time require a WHERE time clause`}, {s: `SELECT count(value) FROM foo group by time(1s) where host = 'hosta.influxdb.org'`, err: `aggregate functions with GROUP BY time require a WHERE time clause`}, + {s: `SELECT count(value) FROM foo group by time`, err: `time() is a function and expects at least one argument`}, + {s: `SELECT count(value) FROM foo group by 'time'`, err: `only time and tag dimensions allowed`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time()`, err: `time dimension expected one argument`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(b)`, err: `time dimension must have one duration argument`}, + {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s), time(2s)`, err: `multiple time dimensions not allowed`}, {s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`}, {s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse number at line 1, char 8`}, {s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`}, - {s: `SELECT derivative(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, {s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`}, {s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`}, {s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`}, @@ -1242,15 +1412,18 @@ func TestParser_ParseStatement(t *testing.T) { {s: `SELECT count(distinct field1, field2) FROM myseries`, err: `count(distinct ) can only have one argument`}, {s: `select count(distinct(too, many, arguments)) from myseries`, err: `count(distinct ) can only have one argument`}, {s: `select count() from myseries`, err: `invalid number of arguments for count, expected 1, got 0`}, + {s: `SELECT derivative(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, {s: `select derivative() from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 0`}, {s: `select derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 3`}, + {s: `SELECT derivative(value) FROM myseries where time < now() and time > now() - 1d`, err: `aggregate function required inside the call to derivative`}, + {s: `SELECT non_negative_derivative(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, + {s: `select non_negative_derivative() from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 0`}, + {s: `select non_negative_derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 3`}, + {s: `SELECT non_negative_derivative(value) FROM myseries where time < now() and time > now() - 1d`, err: `aggregate function required inside the call to non_negative_derivative`}, {s: `SELECT field1 from myseries WHERE host =~ 'asd' LIMIT 1`, err: `found asd, expected regex at line 1, char 42`}, {s: `SELECT value > 2 FROM cpu`, err: `invalid operator > in SELECT clause at line 1, char 8; operator is intended for WHERE clause`}, {s: `SELECT value = 2 FROM cpu`, err: `invalid operator = in SELECT clause at line 1, char 8; operator is intended for WHERE clause`}, {s: `SELECT s =~ /foo/ FROM cpu`, err: `invalid operator =~ in SELECT clause at line 1, char 8; operator is intended for WHERE clause`}, - {s: `SELECT foo, * from cpu`, err: `wildcards can not be combined with other fields`}, - {s: `SELECT *, * from cpu`, err: `found ,, expected FROM at line 1, char 9`}, - {s: `SELECT *, foo from cpu`, err: `found ,, expected FROM at line 1, char 9`}, {s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`}, {s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`}, {s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`}, @@ -1275,6 +1448,10 @@ func TestParser_ParseStatement(t *testing.T) { {s: `CREATE CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 19`}, {s: `CREATE CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 25`}, {s: `DROP FOO`, err: `found FOO, expected SERIES, CONTINUOUS, MEASUREMENT at line 1, char 6`}, + {s: `CREATE DATABASE`, err: `found EOF, expected identifier at line 1, char 17`}, + {s: `CREATE DATABASE IF`, err: `found EOF, expected NOT at line 1, char 20`}, + {s: `CREATE DATABASE IF NOT`, err: `found EOF, expected EXISTS at line 1, char 24`}, + {s: `CREATE DATABASE IF NOT EXISTS`, err: `found EOF, expected identifier at line 1, char 31`}, {s: `DROP DATABASE`, err: `found EOF, expected identifier at line 1, char 15`}, {s: `DROP RETENTION`, err: `found EOF, expected POLICY at line 1, char 16`}, {s: `DROP RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 23`}, @@ -1395,7 +1572,8 @@ func TestParser_ParseStatement(t *testing.T) { if !reflect.DeepEqual(tt.err, errstring(err)) { t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err) } else if tt.err == "" && !reflect.DeepEqual(tt.stmt, stmt) { - t.Logf("\nexp=%s\ngot=%s\n", mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) + t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) + t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String()) t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) } } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner.go index c6dab019c..d071c8571 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner.go @@ -95,6 +95,8 @@ func (s *Scanner) Scan() (tok Token, pos Pos, lit string) { return COMMA, pos, "" case ';': return SEMICOLON, pos, "" + case ':': + return COLON, pos, "" } return ILLEGAL, pos, string(ch0) diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner_test.go index 4a0124d53..b365a1b55 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner_test.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/scanner_test.go @@ -136,8 +136,10 @@ func TestScanner_Scan(t *testing.T) { {s: `KEYS`, tok: influxql.KEYS}, {s: `LIMIT`, tok: influxql.LIMIT}, {s: `SHOW`, tok: influxql.SHOW}, + {s: `SHARDS`, tok: influxql.SHARDS}, {s: `MEASUREMENT`, tok: influxql.MEASUREMENT}, {s: `MEASUREMENTS`, tok: influxql.MEASUREMENTS}, + {s: `NOT`, tok: influxql.NOT}, {s: `OFFSET`, tok: influxql.OFFSET}, {s: `ON`, tok: influxql.ON}, {s: `ORDER`, tok: influxql.ORDER}, diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/token.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/token.go index 0eb5b7a9a..795c7b169 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/token.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/token.go @@ -50,6 +50,7 @@ const ( LPAREN // ( RPAREN // ) COMMA // , + COLON // : SEMICOLON // ; DOT // . @@ -91,6 +92,7 @@ const ( LIMIT MEASUREMENT MEASUREMENTS + NOT OFFSET ON ORDER @@ -109,6 +111,7 @@ const ( SERVERS SET SHOW + SHARDS SLIMIT STATS DIAGNOSTICS @@ -159,6 +162,7 @@ var tokens = [...]string{ LPAREN: "(", RPAREN: ")", COMMA: ",", + COLON: ":", SEMICOLON: ";", DOT: ".", @@ -198,6 +202,7 @@ var tokens = [...]string{ LIMIT: "LIMIT", MEASUREMENT: "MEASUREMENT", MEASUREMENTS: "MEASUREMENTS", + NOT: "NOT", OFFSET: "OFFSET", ON: "ON", ORDER: "ORDER", @@ -216,6 +221,7 @@ var tokens = [...]string{ SERVERS: "SERVERS", SET: "SET", SHOW: "SHOW", + SHARDS: "SHARDS", SLIMIT: "SLIMIT", SOFFSET: "SOFFSET", STATS: "STATS", diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxvar.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxvar.go new file mode 100644 index 000000000..58455515a --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxvar.go @@ -0,0 +1,45 @@ +package influxdb + +import ( + "expvar" + "sync" +) + +var expvarMu sync.Mutex + +// NewStatistics returns an expvar-based map with the given key. Within that map +// is another map. Within there "name" is the Measurement name, "tags" are the tags, +// and values are placed at the key "values". +func NewStatistics(key, name string, tags map[string]string) *expvar.Map { + expvarMu.Lock() + defer expvarMu.Unlock() + + // Add expvar for this service. + var v expvar.Var + if v = expvar.Get(key); v == nil { + v = expvar.NewMap(key) + } + m := v.(*expvar.Map) + + // Set the name + nameVar := &expvar.String{} + nameVar.Set(name) + m.Set("name", nameVar) + + // Set the tags + tagsVar := &expvar.Map{} + tagsVar.Init() + for k, v := range tags { + value := &expvar.String{} + value.Set(v) + tagsVar.Set(k, value) + } + m.Set("tags", tagsVar) + + // Create and set the values entry used for actual stats. + statMap := &expvar.Map{} + statMap.Init() + m.Set("values", statMap) + + return statMap +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data.go index 33c829bb8..71c278eeb 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data.go @@ -132,7 +132,7 @@ func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, return &di.RetentionPolicies[i], nil } } - return nil, ErrRetentionPolicyNotFound + return nil, nil } // CreateRetentionPolicy creates a new retention policy on a database. @@ -172,6 +172,11 @@ func (data *Data) DropRetentionPolicy(database, name string) error { return ErrDatabaseNotFound } + // Prohibit dropping the default retention policy. + if di.DefaultRetentionPolicy == name { + return ErrRetentionPolicyDefault + } + // Remove from list. for i := range di.RetentionPolicies { if di.RetentionPolicies[i].Name == name { @@ -212,6 +217,7 @@ func (data *Data) UpdateRetentionPolicy(database, name string, rpu *RetentionPol } if rpu.Duration != nil { rpi.Duration = *rpu.Duration + rpi.ShardGroupDuration = shardGroupDuration(rpi.Duration) } if rpu.ReplicaN != nil { rpi.ReplicaN = *rpu.ReplicaN @@ -272,7 +278,6 @@ func (data *Data) ShardGroupsByTimeRange(database, policy string, tmin, tmax tim } groups = append(groups, g) } - sort.Sort(ShardGroupInfos(groups)) return groups, nil } @@ -343,13 +348,16 @@ func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time) si := &sgi.Shards[i] for j := 0; j < replicaN; j++ { nodeID := data.Nodes[nodeIndex%len(data.Nodes)].ID - si.OwnerIDs = append(si.OwnerIDs, nodeID) + si.Owners = append(si.Owners, ShardOwner{NodeID: nodeID}) nodeIndex++ } } - // Retention policy has a new shard group, so update the policy. + // Retention policy has a new shard group, so update the policy. Shard + // Groups must be stored in sorted order, as other parts of the system + // assume this to be the case. rpi.ShardGroups = append(rpi.ShardGroups, sgi) + sort.Sort(ShardGroupInfos(rpi.ShardGroups)) return nil } @@ -661,6 +669,31 @@ func (di DatabaseInfo) RetentionPolicy(name string) *RetentionPolicyInfo { return nil } +// ShardInfos returns a list of all shards' info for the database. +func (di DatabaseInfo) ShardInfos() []ShardInfo { + shards := map[uint64]*ShardInfo{} + for i := range di.RetentionPolicies { + for j := range di.RetentionPolicies[i].ShardGroups { + sg := di.RetentionPolicies[i].ShardGroups[j] + // Skip deleted shard groups + if sg.Deleted() { + continue + } + for k := range sg.Shards { + si := &di.RetentionPolicies[i].ShardGroups[j].Shards[k] + shards[si.ID] = si + } + } + } + + infos := make([]ShardInfo, 0, len(shards)) + for _, info := range shards { + infos = append(infos, *info) + } + + return infos +} + // clone returns a deep copy of di. func (di DatabaseInfo) clone() DatabaseInfo { other := di @@ -916,14 +949,14 @@ func (sgi *ShardGroupInfo) unmarshal(pb *internal.ShardGroupInfo) { // ShardInfo represents metadata about a shard. type ShardInfo struct { - ID uint64 - OwnerIDs []uint64 + ID uint64 + Owners []ShardOwner } // OwnedBy returns whether the shard's owner IDs includes nodeID. func (si ShardInfo) OwnedBy(nodeID uint64) bool { - for _, id := range si.OwnerIDs { - if id == nodeID { + for _, so := range si.Owners { + if so.NodeID == nodeID { return true } } @@ -934,9 +967,11 @@ func (si ShardInfo) OwnedBy(nodeID uint64) bool { func (si ShardInfo) clone() ShardInfo { other := si - if si.OwnerIDs != nil { - other.OwnerIDs = make([]uint64, len(si.OwnerIDs)) - copy(other.OwnerIDs, si.OwnerIDs) + if si.Owners != nil { + other.Owners = make([]ShardOwner, len(si.Owners)) + for i := range si.Owners { + other.Owners[i] = si.Owners[i].clone() + } } return other @@ -948,17 +983,64 @@ func (si ShardInfo) marshal() *internal.ShardInfo { ID: proto.Uint64(si.ID), } - pb.OwnerIDs = make([]uint64, len(si.OwnerIDs)) - copy(pb.OwnerIDs, si.OwnerIDs) + pb.Owners = make([]*internal.ShardOwner, len(si.Owners)) + for i := range si.Owners { + pb.Owners[i] = si.Owners[i].marshal() + } return pb } +// UnmarshalBinary decodes the object from a binary format. +func (si *ShardInfo) UnmarshalBinary(buf []byte) error { + var pb internal.ShardInfo + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + si.unmarshal(&pb) + return nil +} + // unmarshal deserializes from a protobuf representation. func (si *ShardInfo) unmarshal(pb *internal.ShardInfo) { si.ID = pb.GetID() - si.OwnerIDs = make([]uint64, len(pb.GetOwnerIDs())) - copy(si.OwnerIDs, pb.GetOwnerIDs()) + + // If deprecated "OwnerIDs" exists then convert it to "Owners" format. + if len(pb.GetOwnerIDs()) > 0 { + si.Owners = make([]ShardOwner, len(pb.GetOwnerIDs())) + for i, x := range pb.GetOwnerIDs() { + si.Owners[i].unmarshal(&internal.ShardOwner{ + NodeID: proto.Uint64(x), + }) + } + } else if len(pb.GetOwners()) > 0 { + si.Owners = make([]ShardOwner, len(pb.GetOwners())) + for i, x := range pb.GetOwners() { + si.Owners[i].unmarshal(x) + } + } +} + +// ShardOwner represents a node that owns a shard. +type ShardOwner struct { + NodeID uint64 +} + +// clone returns a deep copy of so. +func (so ShardOwner) clone() ShardOwner { + return so +} + +// marshal serializes to a protobuf representation. +func (so ShardOwner) marshal() *internal.ShardOwner { + return &internal.ShardOwner{ + NodeID: proto.Uint64(so.NodeID), + } +} + +// unmarshal deserializes from a protobuf representation. +func (so *ShardOwner) unmarshal(pb *internal.ShardOwner) { + so.NodeID = pb.GetNodeID() } // ContinuousQueryInfo represents metadata about a continuous query. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data_test.go index 945d78242..d26eb9467 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data_test.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/data_test.go @@ -9,8 +9,10 @@ import ( "time" "github.com/davecgh/go-spew/spew" + "github.com/gogo/protobuf/proto" "github.com/influxdb/influxdb/influxql" "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/meta/internal" ) // Ensure a node can be created. @@ -178,7 +180,7 @@ func TestData_UpdateRetentionPolicy(t *testing.T) { if rpi, _ := data.RetentionPolicy("db0", "rp1"); !reflect.DeepEqual(rpi, &meta.RetentionPolicyInfo{ Name: "rp1", Duration: 10 * time.Hour, - ShardGroupDuration: 604800000000000, + ShardGroupDuration: 3600000000000, ReplicaN: 3, }) { t.Fatalf("unexpected policy: %#v", rpi) @@ -299,7 +301,13 @@ func TestData_CreateShardGroup(t *testing.T) { StartTime: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), EndTime: time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC), Shards: []meta.ShardInfo{ - {ID: 1, OwnerIDs: []uint64{1, 2}}, + { + ID: 1, + Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + }, + }, }, }) { t.Fatalf("unexpected shard group: %#v", sgi) @@ -570,8 +578,12 @@ func TestData_Clone(t *testing.T) { EndTime: time.Date(2000, time.February, 1, 0, 0, 0, 0, time.UTC), Shards: []meta.ShardInfo{ { - ID: 200, - OwnerIDs: []uint64{1, 3, 4}, + ID: 200, + Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 3}, + {NodeID: 4}, + }, }, }, }, @@ -605,8 +617,8 @@ func TestData_Clone(t *testing.T) { } // Ensure that changing data in the clone does not affect the original. - other.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].OwnerIDs[1] = 9 - if v := data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].OwnerIDs[1]; v != 3 { + other.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners[1].NodeID = 9 + if v := data.Databases[0].RetentionPolicies[0].ShardGroups[0].Shards[0].Owners[1].NodeID; v != 3 { t.Fatalf("editing clone changed original: %v", v) } } @@ -637,8 +649,12 @@ func TestData_MarshalBinary(t *testing.T) { EndTime: time.Date(2000, time.February, 1, 0, 0, 0, 0, time.UTC), Shards: []meta.ShardInfo{ { - ID: 200, - OwnerIDs: []uint64{1, 3, 4}, + ID: 200, + Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 3}, + {NodeID: 4}, + }, }, }, }, @@ -682,3 +698,33 @@ func TestData_MarshalBinary(t *testing.T) { t.Fatalf("unexpected users: %#v", other.Users) } } + +// Ensure shards with deprecated "OwnerIDs" can be decoded. +func TestShardInfo_UnmarshalBinary_OwnerIDs(t *testing.T) { + // Encode deprecated form to bytes. + buf, err := proto.Marshal(&internal.ShardInfo{ + ID: proto.Uint64(1), + OwnerIDs: []uint64{10, 20, 30}, + }) + if err != nil { + t.Fatal(err) + } + + // Decode deprecated form. + var si meta.ShardInfo + if err := si.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } + + // Verify data is migrated correctly. + if !reflect.DeepEqual(si, meta.ShardInfo{ + ID: 1, + Owners: []meta.ShardOwner{ + {NodeID: 10}, + {NodeID: 20}, + {NodeID: 30}, + }, + }) { + t.Fatalf("unexpected shard info: %s", spew.Sdump(si)) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/errors.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/errors.go index 44c54f964..5945c727d 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/errors.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/errors.go @@ -43,6 +43,10 @@ var ( // ErrRetentionPolicyExists is returned when creating an already existing policy. ErrRetentionPolicyExists = errors.New("retention policy already exists") + // ErrRetentionPolicyDefault is returned when attempting a prohibited operation + // on a default retention policy. + ErrRetentionPolicyDefault = errors.New("retention policy is default") + // ErrRetentionPolicyNotFound is returned when mutating a policy that doesn't exist. ErrRetentionPolicyNotFound = errors.New("retention policy not found") diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.pb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.pb.go index fbd8cc504..4d6752d85 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.pb.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.pb.go @@ -15,6 +15,7 @@ It has these top-level messages: RetentionPolicyInfo ShardGroupInfo ShardInfo + ShardOwner ContinuousQueryInfo UserInfo UserPrivilege @@ -416,9 +417,10 @@ func (m *ShardGroupInfo) GetShards() []*ShardInfo { } type ShardInfo struct { - ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"` - OwnerIDs []uint64 `protobuf:"varint,2,rep" json:"OwnerIDs,omitempty"` - XXX_unrecognized []byte `json:"-"` + ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"` + OwnerIDs []uint64 `protobuf:"varint,2,rep" json:"OwnerIDs,omitempty"` + Owners []*ShardOwner `protobuf:"bytes,3,rep" json:"Owners,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *ShardInfo) Reset() { *m = ShardInfo{} } @@ -439,6 +441,29 @@ func (m *ShardInfo) GetOwnerIDs() []uint64 { return nil } +func (m *ShardInfo) GetOwners() []*ShardOwner { + if m != nil { + return m.Owners + } + return nil +} + +type ShardOwner struct { + NodeID *uint64 `protobuf:"varint,1,req" json:"NodeID,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShardOwner) Reset() { *m = ShardOwner{} } +func (m *ShardOwner) String() string { return proto.CompactTextString(m) } +func (*ShardOwner) ProtoMessage() {} + +func (m *ShardOwner) GetNodeID() uint64 { + if m != nil && m.NodeID != nil { + return *m.NodeID + } + return 0 +} + type ContinuousQueryInfo struct { Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"` Query *string `protobuf:"bytes,2,req" json:"Query,omitempty"` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.proto b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.proto index 2aa50244b..311147225 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.proto +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/internal/meta.proto @@ -49,8 +49,13 @@ message ShardGroupInfo { } message ShardInfo { - required uint64 ID = 1; - repeated uint64 OwnerIDs = 2; + required uint64 ID = 1; + repeated uint64 OwnerIDs = 2 [deprecated=true]; + repeated ShardOwner Owners = 3; +} + +message ShardOwner { + required uint64 NodeID = 1; } message ContinuousQueryInfo { diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/rpc_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/rpc_test.go index 3f60c6bd0..40f3540af 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/rpc_test.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/rpc_test.go @@ -122,7 +122,7 @@ func TestRPCFetchDataMatchesBlocking(t *testing.T) { // Simulate the rmote index changing and unblocking fs.mu.Lock() - fs.md.Index = 100 + fs.md = &Data{Index: 100} fs.mu.Unlock() close(fs.blockChan) wg.Wait() diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/state.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/state.go index a442a8007..c9fc02c4d 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/state.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/state.go @@ -334,8 +334,6 @@ func (r *localRaft) leader() string { } func (r *localRaft) isLeader() bool { - r.store.mu.RLock() - defer r.store.mu.RUnlock() if r.raft == nil { return false } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor.go index 08207a1b2..a5e5655c7 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor.go @@ -1,7 +1,10 @@ package meta import ( + "bytes" "fmt" + "strconv" + "time" "github.com/influxdb/influxdb/influxql" ) @@ -80,6 +83,8 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql. return e.executeDropContinuousQueryStatement(stmt) case *influxql.ShowContinuousQueriesStatement: return e.executeShowContinuousQueriesStatement(stmt) + case *influxql.ShowShardsStatement: + return e.executeShowShardsStatement(stmt) case *influxql.ShowStatsStatement: return e.executeShowStatsStatement(stmt) default: @@ -89,6 +94,9 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql. func (e *StatementExecutor) executeCreateDatabaseStatement(q *influxql.CreateDatabaseStatement) *influxql.Result { _, err := e.Store.CreateDatabase(q.Name) + if err == ErrDatabaseExists && q.IfNotExists { + err = nil + } return &influxql.Result{Err: err} } @@ -281,6 +289,50 @@ func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql return &influxql.Result{Series: rows} } +func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) *influxql.Result { + dis, err := e.Store.Databases() + if err != nil { + return &influxql.Result{Err: err} + } + + rows := []*influxql.Row{} + for _, di := range dis { + row := &influxql.Row{Columns: []string{"id", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + for _, si := range sgi.Shards { + ownerIDs := make([]uint64, len(si.Owners)) + for i, owner := range si.Owners { + ownerIDs[i] = owner.NodeID + } + + row.Values = append(row.Values, []interface{}{ + si.ID, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + joinUint64(ownerIDs), + }) + } + } + } + rows = append(rows, row) + } + return &influxql.Result{Series: rows} +} + func (e *StatementExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) *influxql.Result { return &influxql.Result{Err: fmt.Errorf("SHOW STATS is not implemented yet")} } + +// joinUint64 returns a comma-delimited string of uint64 numbers. +func joinUint64(a []uint64) string { + var buf bytes.Buffer + for i, x := range a { + buf.WriteString(strconv.FormatUint(x, 10)) + if i < len(a)-1 { + buf.WriteRune(',') + } + } + return buf.String() +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor_test.go index 64894aaea..7ad290084 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor_test.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor_test.go @@ -625,13 +625,13 @@ func TestStatementExecutor_ExecuteStatement_CreateContinuousQuery(t *testing.T) t.Fatalf("unexpected database: %s", database) } else if name != "cq0" { t.Fatalf("unexpected name: %s", name) - } else if query != `CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(*) INTO db1 FROM db0 GROUP BY time(1h) END` { + } else if query != `CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(field1) INTO db1 FROM db0 GROUP BY time(1h) END` { t.Fatalf("unexpected query: %s", query) } return nil } - stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(*) INTO db1 FROM db0 GROUP BY time(1h) END`) + stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(field1) INTO db1 FROM db0 GROUP BY time(1h) END`) if res := e.ExecuteStatement(stmt); res.Err != nil { t.Fatal(res.Err) } else if res.Series != nil { @@ -646,7 +646,7 @@ func TestStatementExecutor_ExecuteStatement_CreateContinuousQuery_Err(t *testing return errors.New("marker") } - stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(*) INTO db1 FROM db0 GROUP BY time(1h) END`) + stmt := influxql.MustParseStatement(`CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count(field1) INTO db1 FROM db0 GROUP BY time(1h) END`) if res := e.ExecuteStatement(stmt); res.Err == nil || res.Err.Error() != "marker" { t.Fatalf("unexpected error: %s", res.Err) } @@ -693,14 +693,14 @@ func TestStatementExecutor_ExecuteStatement_ShowContinuousQueries(t *testing.T) { Name: "db0", ContinuousQueries: []meta.ContinuousQueryInfo{ - {Name: "cq0", Query: "SELECT count(*) INTO db1 FROM db0"}, - {Name: "cq1", Query: "SELECT count(*) INTO db2 FROM db0"}, + {Name: "cq0", Query: "SELECT count(field1) INTO db1 FROM db0"}, + {Name: "cq1", Query: "SELECT count(field1) INTO db2 FROM db0"}, }, }, { Name: "db1", ContinuousQueries: []meta.ContinuousQueryInfo{ - {Name: "cq2", Query: "SELECT count(*) INTO db3 FROM db1"}, + {Name: "cq2", Query: "SELECT count(field1) INTO db3 FROM db1"}, }, }, }, nil @@ -714,15 +714,15 @@ func TestStatementExecutor_ExecuteStatement_ShowContinuousQueries(t *testing.T) Name: "db0", Columns: []string{"name", "query"}, Values: [][]interface{}{ - {"cq0", "SELECT count(*) INTO db1 FROM db0"}, - {"cq1", "SELECT count(*) INTO db2 FROM db0"}, + {"cq0", "SELECT count(field1) INTO db1 FROM db0"}, + {"cq1", "SELECT count(field1) INTO db2 FROM db0"}, }, }, { Name: "db1", Columns: []string{"name", "query"}, Values: [][]interface{}{ - {"cq2", "SELECT count(*) INTO db3 FROM db1"}, + {"cq2", "SELECT count(field1) INTO db3 FROM db1"}, }, }, }) { @@ -755,7 +755,7 @@ func TestStatementExecutor_ExecuteStatement_Unsupported(t *testing.T) { // Execute a SELECT statement. NewStatementExecutor().ExecuteStatement( - influxql.MustParseStatement(`SELECT count(*) FROM db0`), + influxql.MustParseStatement(`SELECT count(field1) FROM db0`), ) }() @@ -765,6 +765,57 @@ func TestStatementExecutor_ExecuteStatement_Unsupported(t *testing.T) { } } +// Ensure a SHOW SHARDS statement can be executed. +func TestStatementExecutor_ExecuteStatement_ShowShards(t *testing.T) { + e := NewStatementExecutor() + e.Store.DatabasesFn = func() ([]meta.DatabaseInfo, error) { + return []meta.DatabaseInfo{ + { + Name: "foo", + RetentionPolicies: []meta.RetentionPolicyInfo{ + { + Duration: time.Second, + ShardGroups: []meta.ShardGroupInfo{ + { + StartTime: time.Unix(0, 0), + EndTime: time.Unix(1, 0), + Shards: []meta.ShardInfo{ + { + ID: 1, + Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }, + }, + { + ID: 2, + }, + }, + }, + }, + }, + }, + }, + }, nil + } + + if res := e.ExecuteStatement(influxql.MustParseStatement(`SHOW SHARDS`)); res.Err != nil { + t.Fatal(res.Err) + } else if !reflect.DeepEqual(res.Series, influxql.Rows{ + { + Name: "foo", + Columns: []string{"id", "start_time", "end_time", "expiry_time", "owners"}, + Values: [][]interface{}{ + {uint64(1), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", "1,2,3"}, + {uint64(2), "1970-01-01T00:00:00Z", "1970-01-01T00:00:01Z", "1970-01-01T00:00:02Z", ""}, + }, + }, + }) { + t.Fatalf("unexpected rows: %s", spew.Sdump(res.Series)) + } +} + // StatementExecutor represents a test wrapper for meta.StatementExecutor. type StatementExecutor struct { *meta.StatementExecutor diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store.go index 23bac17f2..a2d7ce8f6 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store.go @@ -254,7 +254,10 @@ func (s *Store) Open() error { close(s.ready) } - return nil + // Wait for a leader to be elected so we know the raft log is loaded + // and up to date + <-s.ready + return s.WaitForLeader(0) } // syncNodeInfo continuously tries to update the current nodes hostname @@ -858,6 +861,7 @@ func (s *Store) CreateDatabase(name string) (*DatabaseInfo, error) { ); err != nil { return nil, err } + s.Logger.Printf("database '%s' created", name) if s.retentionAutoCreate { // Read node count. @@ -977,6 +981,7 @@ func (s *Store) CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) return nil, err } + s.Logger.Printf("retention policy '%s' for database '%s' created", rpi.Name, database) return s.RetentionPolicy(database, rpi.Name) } @@ -1389,38 +1394,34 @@ func (s *Store) UserCount() (count int, err error) { return } -// PrecreateShardGroups creates shard groups whose endtime is before the cutoff time passed in. This -// avoid the need for these shards to be created when data for the corresponding time range arrives. -// Shard creation involves Raft consensus, and precreation avoids taking the hit at write-time. -func (s *Store) PrecreateShardGroups(cutoff time.Time) error { +// PrecreateShardGroups creates shard groups whose endtime is before the 'to' time passed in, but +// is yet to expire before 'from'. This is to avoid the need for these shards to be created when data +// for the corresponding time range arrives. Shard creation involves Raft consensus, and precreation +// avoids taking the hit at write-time. +func (s *Store) PrecreateShardGroups(from, to time.Time) error { s.read(func(data *Data) error { for _, di := range data.Databases { for _, rp := range di.RetentionPolicies { - for _, g := range rp.ShardGroups { - // Check to see if it is not deleted and going to end before our interval - if !g.Deleted() && g.EndTime.Before(cutoff) { - nextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond) + if len(rp.ShardGroups) == 0 { + // No data was ever written to this group, or all groups have been deleted. + continue + } + g := rp.ShardGroups[len(rp.ShardGroups)-1] // Get the last group in time. + if !g.Deleted() && g.EndTime.Before(to) && g.EndTime.After(from) { + // Group is not deleted, will end before the future time, but is still yet to expire. + // This last check is important, so the system doesn't create shards groups wholly + // in the past. - // Check if successive shard group exists. - if sgi, err := s.ShardGroupByTimestamp(di.Name, rp.Name, nextShardGroupTime); err != nil { - s.Logger.Printf("failed to check if successive shard group for group exists %d: %s", - g.ID, err.Error()) - continue - } else if sgi != nil && !sgi.Deleted() { - continue - } - - // It doesn't. Create it. - if newGroup, err := s.CreateShardGroupIfNotExists(di.Name, rp.Name, nextShardGroupTime); err != nil { - s.Logger.Printf("failed to create successive shard group for group %d: %s", - g.ID, err.Error()) - } else { - s.Logger.Printf("new shard group %d successfully created for database %s, retention policy %s", - newGroup.ID, di.Name, rp.Name) - } + // Create successive shard group. + nextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond) + if newGroup, err := s.CreateShardGroupIfNotExists(di.Name, rp.Name, nextShardGroupTime); err != nil { + s.Logger.Printf("failed to create successive shard group for group %d: %s", + g.ID, err.Error()) + } else { + s.Logger.Printf("new shard group %d successfully created for database %s, retention policy %s", + newGroup.ID, di.Name, rp.Name) } } - } } return nil diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store_test.go index 6dca9b57c..d287303d8 100644 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store_test.go +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/meta/store_test.go @@ -361,7 +361,7 @@ func TestStore_UpdateRetentionPolicy(t *testing.T) { } else if !reflect.DeepEqual(rpi, &meta.RetentionPolicyInfo{ Name: "rp1", Duration: 10 * time.Hour, - ShardGroupDuration: 7 * 24 * time.Hour, + ShardGroupDuration: 1 * time.Hour, ReplicaN: 1, }) { t.Fatalf("unexpected policy: %#v", rpi) @@ -489,30 +489,57 @@ func TestStore_PrecreateShardGroup(t *testing.T) { s := MustOpenStore() defer s.Close() - // Create node, database, policy, & group. + // Create node, database, policy, & groups. if _, err := s.CreateNode("host0"); err != nil { t.Fatal(err) } else if _, err := s.CreateDatabase("db0"); err != nil { t.Fatal(err) } else if _, err = s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp0", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil { t.Fatal(err) - } else if _, err := s.CreateShardGroup("db0", "rp0", time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil { + } else if _, err = s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp1", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil { t.Fatal(err) - } else if err := s.PrecreateShardGroups(time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC)); err != nil { + } else if _, err = s.CreateRetentionPolicy("db0", &meta.RetentionPolicyInfo{Name: "rp2", ReplicaN: 2, Duration: 1 * time.Hour}); err != nil { + t.Fatal(err) + } else if _, err := s.CreateShardGroup("db0", "rp0", time.Date(2001, time.January, 1, 1, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } else if _, err := s.CreateShardGroup("db0", "rp1", time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC)); err != nil { t.Fatal(err) } + if err := s.PrecreateShardGroups(time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC), time.Date(2001, time.January, 1, 3, 0, 0, 0, time.UTC)); err != nil { + t.Fatal(err) + } + + // rp0 should undergo precreation. groups, err := s.ShardGroups("db0", "rp0") if err != nil { t.Fatal(err) } if len(groups) != 2 { - t.Fatalf("shard group precreation failed to create new shard group") + t.Fatalf("shard group precreation failed to create new shard group for rp0") } - if groups[1].StartTime != time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC) { + if groups[1].StartTime != time.Date(2001, time.January, 1, 2, 0, 0, 0, time.UTC) { t.Fatalf("precreated shard group has wrong start time, exp %s, got %s", time.Date(2000, time.January, 1, 1, 0, 0, 0, time.UTC), groups[1].StartTime) } + + // rp1 should not undergo precreation since it is completely in the past. + groups, err = s.ShardGroups("db0", "rp1") + if err != nil { + t.Fatal(err) + } + if len(groups) != 1 { + t.Fatalf("shard group precreation created new shard group for rp1") + } + + // rp2 should not undergo precreation since it has no shards. + groups, err = s.ShardGroups("db0", "rp2") + if err != nil { + t.Fatal(err) + } + if len(groups) != 0 { + t.Fatalf("shard group precreation created new shard group for rp2") + } } // Ensure the store can create a new continuous query. @@ -828,14 +855,14 @@ func TestCluster_Restart(t *testing.T) { t.Fatal("no leader found") } - // Add 5 more ndes, 2 should become raft peers, 3 remote raft clients + // Add 5 more nodes, 2 should become raft peers, 3 remote raft clients for i := 0; i < 5; i++ { if err := c.Join(); err != nil { t.Fatalf("failed to join cluster: %v", err) } } - // The tests use a host host assigned listener port. We need to re-use + // The tests use a host assigned listener port. We need to re-use // the original ports when the new cluster is restarted so that the existing // peer store addresses can be reached. addrs := []string{} @@ -858,10 +885,25 @@ func TestCluster_Restart(t *testing.T) { // Re-create the cluster nodes from existing disk paths and addresses stores := []*Store{} + storeChan := make(chan *Store) for i, s := range c.Stores { - store := MustOpenStoreWithPath(addrs[i], s.Path()) + + // Need to start each instance asynchronously because they have existing raft peers + // store. Starting one will block indefinitely because it will not be able to become + // leader until another peer is available to hold an election. + go func(addr, path string) { + store := MustOpenStoreWithPath(addr, path) + storeChan <- store + }(addrs[i], s.Path()) + + } + + // Collect up our restart meta-stores + for range c.Stores { + store := <-storeChan stores = append(stores, store) } + c.Stores = stores // Wait for the cluster to stabilize diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/README.md new file mode 100644 index 000000000..97fac284d --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/README.md @@ -0,0 +1,47 @@ +# System Monitoring +_This functionality should be considered experimental and is subject to change._ + +_System Monitoring_ means all statistical and diagnostic information made availabe to the user of InfluxDB system, about the system itself. Its purpose is to assist with troubleshooting and performance analysis of the database itself. + +## Statistics vs. Diagnostics +A distinction is made between _statistics_ and _diagnostics_ for the purposes of monitoring. Generally a statistical quality is something that is being counted, and for which it makes sense to store persistently for historical analysis. Diagnostic information is not necessarily numerical, and may not make sense to store. + +An example of statistical information would be the number of points received over UDP, or the number of queries executed. Examples of diagnostic information would be a list of current Graphite TCP connections, the version of InfluxDB, or the uptime of the process. + +## System Statistics +`SHOW STATS` displays statisics about subsystems within the running `influxd` process. Statistics include points received, points indexed, bytes written to disk, TCP connections handled etc. These statistics are all zero when the InfluxDB process starts. + +All statistics are written, by default, by each node to a "monitor" database within the InfluxDB system, allowing analysis of aggregated statistical data using the standard InfluxQL language. This allows users to track the performance of their system. Importantly, this allows cluster-level statistics to be viewed, since by querying the monitor database, statistics from all nodes may be queried. This can be a very powerful approach for troubleshooting your InfluxDB system and understanding its behaviour. + +## System Diagnostics +`SHOW DIAGNOSTICS` displays various diagnostic information about the `influxd` process. This information is not stored persistently within the InfluxDB system. + +## Standard expvar support +All statistical information is available at HTTP API endpoint `/debug/vars`, in [expvar](https://golang.org/pkg/expvar/) format, allowing external systems to monitor an InfluxDB node. By default, the full path to this endpoint is `http://localhost:8086/debug/vars`. + +## Configuration +The `monitor` module allows the following configuration: + + * Whether to write statistical and diagnostic information to an InfluxDB system. This is enabled by default. + * The name of the database to where this information should be written. Defaults to `_internal`. The information is written to the default retention policy for the given database. + * The name of the retention policy, along with full configuration control of the retention policy, if the default retention policy is not suitable. + * The rate at which this information should be written. The default rate is once every 10 seconds. + +# Design and Implementation + +A new module named `monitor` supports all basic statistics and diagnostic functionality. This includes: + + * Allowing other modules to register statistics and diagnostics information, allowing it to be accessed on demand by the `monitor` module. + * Serving the statistics and diagnostic information to the user, in response to commands such as `SHOW DIAGNOSTICS`. + * Expose standard Go runtime information such as garbage collection statistics. + * Make all collected expvar data via HTTP, for collection by 3rd-party tools. + * Writing the statistical information to the "monitor" database, for query purposes. + +## Registering statistics and diagnostics + +To export statistical information with the `monitor` system, code simply calls `influxdb.NewStatistics()` and receives an `expvar.Map` instance in response. This object can then be used to store statistics. To register diagnostic information, `monitor.RegisterDiagnosticsClient` is called, passing a `influxdb.monitor.DiagsClient` object to `monitor`. + +## expvar +Statistical information is gathered by each package using [expvar](https://golang.org/pkg/expvar). Each package registers a map using its package name. + +Due to the nature of `expvar`, statistical information is reset to its initial state when a server is restarted. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/build_info.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/build_info.go new file mode 100644 index 000000000..283038bfb --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/build_info.go @@ -0,0 +1,18 @@ +package monitor + +// system captures build diagnostics +type build struct { + Version string + Commit string + Branch string +} + +func (b *build) Diagnostics() (*Diagnostic, error) { + diagnostics := map[string]interface{}{ + "Version": b.Version, + "Commit": b.Commit, + "Branch": b.Branch, + } + + return DiagnosticFromMap(diagnostics), nil +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/config.go new file mode 100644 index 000000000..a5a78bf28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/config.go @@ -0,0 +1,35 @@ +package monitor + +import ( + "time" + + "github.com/influxdb/influxdb/toml" +) + +const ( + // DefaultStoreEnabled is whether the system writes gathered information in + // an InfluxDB system for historical analysis. + DefaultStoreEnabled = true + + // DefaultStoreDatabase is the name of the database where gathered information is written + DefaultStoreDatabase = "_internal" + + // DefaultStoreInterval is the period between storing gathered information. + DefaultStoreInterval = 10 * time.Second +) + +// Config represents the configuration for the monitor service. +type Config struct { + StoreEnabled bool `toml:"store-enabled"` + StoreDatabase string `toml:"store-database"` + StoreInterval toml.Duration `toml:"store-interval"` +} + +// NewConfig returns an instance of Config with defaults. +func NewConfig() Config { + return Config{ + StoreEnabled: true, + StoreDatabase: DefaultStoreDatabase, + StoreInterval: toml.Duration(DefaultStoreInterval), + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/config_test.go new file mode 100644 index 000000000..ee62e7346 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/config_test.go @@ -0,0 +1,30 @@ +package monitor_test + +import ( + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdb/influxdb/monitor" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c monitor.Config + if _, err := toml.Decode(` +store-enabled=true +store-database="the_db" +store-interval="10m" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if !c.StoreEnabled { + t.Fatalf("unexpected store-enabled: %v", c.StoreEnabled) + } else if c.StoreDatabase != "the_db" { + t.Fatalf("unexpected store-database: %s", c.StoreDatabase) + } else if time.Duration(c.StoreInterval) != 10*time.Minute { + t.Fatalf("unexpected store-interval: %s", c.StoreInterval) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/go_runtime.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/go_runtime.go new file mode 100644 index 000000000..f45f4fce2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/go_runtime.go @@ -0,0 +1,19 @@ +package monitor + +import ( + "runtime" +) + +// goRuntime captures Go runtime diagnostics +type goRuntime struct{} + +func (g *goRuntime) Diagnostics() (*Diagnostic, error) { + diagnostics := map[string]interface{}{ + "GOARCH": runtime.GOARCH, + "GOOS": runtime.GOOS, + "GOMAXPROCS": runtime.GOMAXPROCS(-1), + "version": runtime.Version(), + } + + return DiagnosticFromMap(diagnostics), nil +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/network.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/network.go new file mode 100644 index 000000000..80c0f6c9c --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/network.go @@ -0,0 +1,21 @@ +package monitor + +import ( + "os" +) + +// network captures network diagnostics +type network struct{} + +func (n *network) Diagnostics() (*Diagnostic, error) { + h, err := os.Hostname() + if err != nil { + return nil, err + } + + diagnostics := map[string]interface{}{ + "hostname": h, + } + + return DiagnosticFromMap(diagnostics), nil +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/service.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/service.go new file mode 100644 index 000000000..d76e31d5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/service.go @@ -0,0 +1,406 @@ +package monitor + +import ( + "expvar" + "fmt" + "log" + "os" + "runtime" + "sort" + "strconv" + "sync" + "time" + + "github.com/influxdb/influxdb/cluster" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/tsdb" +) + +const leaderWaitTimeout = 30 * time.Second + +const ( + MonitorRetentionPolicy = "monitor" + MonitorRetentionPolicyDuration = 7 * 24 * time.Hour +) + +// DiagsClient is the interface modules implement if they register diags with monitor. +type DiagsClient interface { + Diagnostics() (*Diagnostic, error) +} + +// The DiagsClientFunc type is an adapter to allow the use of +// ordinary functions as Diagnostis clients. +type DiagsClientFunc func() (*Diagnostic, error) + +// Diagnostics calls f(). +func (f DiagsClientFunc) Diagnostics() (*Diagnostic, error) { + return f() +} + +// Diagnostic represents a table of diagnostic information. The first value +// is the name of the columns, the second is a slice of interface slices containing +// the values for each column, by row. This information is never written to an InfluxDB +// system and is display-only. An example showing, say, connections follows: +// +// source_ip source_port dest_ip dest_port +// 182.1.0.2 2890 127.0.0.1 38901 +// 174.33.1.2 2924 127.0.0.1 38902 +type Diagnostic struct { + Columns []string + Rows [][]interface{} +} + +func NewDiagnostic(columns []string) *Diagnostic { + return &Diagnostic{ + Columns: columns, + Rows: make([][]interface{}, 0), + } +} + +func (d *Diagnostic) AddRow(r []interface{}) { + d.Rows = append(d.Rows, r) +} + +// Monitor represents an instance of the monitor system. +type Monitor struct { + // Build information for diagnostics. + Version string + Commit string + Branch string + + wg sync.WaitGroup + done chan struct{} + mu sync.Mutex + + diagRegistrations map[string]DiagsClient + + storeEnabled bool + storeDatabase string + storeRetentionPolicy string + storeRetentionDuration time.Duration + storeReplicationFactor int + storeAddress string + storeInterval time.Duration + + MetaStore interface { + ClusterID() (uint64, error) + NodeID() uint64 + WaitForLeader(d time.Duration) error + CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) + CreateRetentionPolicyIfNotExists(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) + SetDefaultRetentionPolicy(database, name string) error + DropRetentionPolicy(database, name string) error + } + + PointsWriter interface { + WritePoints(p *cluster.WritePointsRequest) error + } + + Logger *log.Logger +} + +// New returns a new instance of the monitor system. +func New(c Config) *Monitor { + return &Monitor{ + done: make(chan struct{}), + diagRegistrations: make(map[string]DiagsClient), + storeEnabled: c.StoreEnabled, + storeDatabase: c.StoreDatabase, + storeInterval: time.Duration(c.StoreInterval), + Logger: log.New(os.Stderr, "[monitor] ", log.LstdFlags), + } +} + +// Open opens the monitoring system, using the given clusterID, node ID, and hostname +// for identification purposem. +func (m *Monitor) Open() error { + m.Logger.Printf("Starting monitor system") + + // Self-register various stats and diagnostics. + m.RegisterDiagnosticsClient("build", &build{ + Version: m.Version, + Commit: m.Commit, + Branch: m.Branch, + }) + m.RegisterDiagnosticsClient("runtime", &goRuntime{}) + m.RegisterDiagnosticsClient("network", &network{}) + m.RegisterDiagnosticsClient("system", &system{}) + + // If enabled, record stats in a InfluxDB system. + if m.storeEnabled { + + // Start periodic writes to system. + m.wg.Add(1) + go m.storeStatistics() + } + + return nil +} + +// Close closes the monitor system. +func (m *Monitor) Close() { + m.Logger.Println("shutting down monitor system") + close(m.done) + m.wg.Wait() + m.done = nil +} + +// SetLogger sets the internal logger to the logger passed in. +func (m *Monitor) SetLogger(l *log.Logger) { + m.Logger = l +} + +// RegisterDiagnosticsClient registers a diagnostics client with the given name and tags. +func (m *Monitor) RegisterDiagnosticsClient(name string, client DiagsClient) error { + m.mu.Lock() + defer m.mu.Unlock() + m.diagRegistrations[name] = client + m.Logger.Printf(`'%s' registered for diagnostics monitoring`, name) + return nil +} + +// Statistics returns the combined statistics for all expvar data. The given +// tags are added to each of the returned statistics. +func (m *Monitor) Statistics(tags map[string]string) ([]*statistic, error) { + statistics := make([]*statistic, 0) + + expvar.Do(func(kv expvar.KeyValue) { + // Skip built-in expvar stats. + if kv.Key == "memstats" || kv.Key == "cmdline" { + return + } + + statistic := &statistic{ + Tags: make(map[string]string), + Values: make(map[string]interface{}), + } + + // Add any supplied tags. + for k, v := range tags { + statistic.Tags[k] = v + } + + // Every other top-level expvar value is a map. + m := kv.Value.(*expvar.Map) + + m.Do(func(subKV expvar.KeyValue) { + switch subKV.Key { + case "name": + // straight to string name. + u, err := strconv.Unquote(subKV.Value.String()) + if err != nil { + return + } + statistic.Name = u + case "tags": + // string-string tags map. + n := subKV.Value.(*expvar.Map) + n.Do(func(t expvar.KeyValue) { + u, err := strconv.Unquote(t.Value.String()) + if err != nil { + return + } + statistic.Tags[t.Key] = u + }) + case "values": + // string-interface map. + n := subKV.Value.(*expvar.Map) + n.Do(func(kv expvar.KeyValue) { + var f interface{} + var err error + switch v := kv.Value.(type) { + case *expvar.Float: + f, err = strconv.ParseFloat(v.String(), 64) + if err != nil { + return + } + case *expvar.Int: + f, err = strconv.ParseInt(v.String(), 10, 64) + if err != nil { + return + } + default: + return + } + statistic.Values[kv.Key] = f + }) + } + }) + + // If a registered client has no field data, don't include it in the results + if len(statistic.Values) == 0 { + return + } + + statistics = append(statistics, statistic) + }) + + // Add Go memstats. + statistic := &statistic{ + Name: "runtime", + Tags: make(map[string]string), + Values: make(map[string]interface{}), + } + var rt runtime.MemStats + runtime.ReadMemStats(&rt) + statistic.Values = map[string]interface{}{ + "Alloc": int64(rt.Alloc), + "TotalAlloc": int64(rt.TotalAlloc), + "Sys": int64(rt.Sys), + "Lookups": int64(rt.Lookups), + "Mallocs": int64(rt.Mallocs), + "Frees": int64(rt.Frees), + "HeapAlloc": int64(rt.HeapAlloc), + "HeapSys": int64(rt.HeapSys), + "HeapIdle": int64(rt.HeapIdle), + "HeapInUse": int64(rt.HeapInuse), + "HeapReleased": int64(rt.HeapReleased), + "HeapObjects": int64(rt.HeapObjects), + "PauseTotalNs": int64(rt.PauseTotalNs), + "NumGC": int64(rt.NumGC), + "NumGoroutine": int64(runtime.NumGoroutine()), + } + statistics = append(statistics, statistic) + + return statistics, nil +} + +func (m *Monitor) Diagnostics() (map[string]*Diagnostic, error) { + m.mu.Lock() + defer m.mu.Unlock() + + diags := make(map[string]*Diagnostic, len(m.diagRegistrations)) + for k, v := range m.diagRegistrations { + d, err := v.Diagnostics() + if err != nil { + continue + } + diags[k] = d + } + return diags, nil +} + +// storeStatistics writes the statistics to an InfluxDB system. +func (m *Monitor) storeStatistics() { + defer m.wg.Done() + m.Logger.Printf("Storing statistics in database '%s' retention policy '%s', at interval %s", + m.storeDatabase, m.storeRetentionPolicy, m.storeInterval) + + if err := m.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil { + m.Logger.Printf("failed to detect a cluster leader, terminating storage: %s", err.Error()) + return + } + + // Get cluster-level metadata. Nothing different is going to happen if errors occur. + clusterID, _ := m.MetaStore.ClusterID() + nodeID := m.MetaStore.NodeID() + hostname, _ := os.Hostname() + clusterTags := map[string]string{ + "clusterID": fmt.Sprintf("%d", clusterID), + "nodeID": fmt.Sprintf("%d", nodeID), + "hostname": hostname, + } + + if _, err := m.MetaStore.CreateDatabaseIfNotExists(m.storeDatabase); err != nil { + m.Logger.Printf("failed to create database '%s', terminating storage: %s", + m.storeDatabase, err.Error()) + return + } + + rpi := meta.NewRetentionPolicyInfo(MonitorRetentionPolicy) + rpi.Duration = MonitorRetentionPolicyDuration + rpi.ReplicaN = 1 + if _, err := m.MetaStore.CreateRetentionPolicyIfNotExists(m.storeDatabase, rpi); err != nil { + m.Logger.Printf("failed to create retention policy '%s', terminating storage: %s", + rpi.Name, err.Error()) + return + } + + if err := m.MetaStore.SetDefaultRetentionPolicy(m.storeDatabase, rpi.Name); err != nil { + m.Logger.Printf("failed to set default retention policy on '%s', terminating storage: %s", + m.storeDatabase, err.Error()) + return + } + + if err := m.MetaStore.DropRetentionPolicy(m.storeDatabase, "default"); err != nil && err != meta.ErrRetentionPolicyNotFound { + m.Logger.Printf("failed to delete retention policy 'default', terminating storage: %s", err.Error()) + return + } + + tick := time.NewTicker(m.storeInterval) + defer tick.Stop() + for { + select { + case <-tick.C: + stats, err := m.Statistics(clusterTags) + if err != nil { + m.Logger.Printf("failed to retrieve registered statistics: %s", err) + continue + } + + points := make(tsdb.Points, 0, len(stats)) + for _, s := range stats { + points = append(points, tsdb.NewPoint(s.Name, s.Tags, s.Values, time.Now())) + } + + err = m.PointsWriter.WritePoints(&cluster.WritePointsRequest{ + Database: m.storeDatabase, + RetentionPolicy: m.storeRetentionPolicy, + ConsistencyLevel: cluster.ConsistencyLevelOne, + Points: points, + }) + if err != nil { + m.Logger.Printf("failed to store statistics: %s", err) + } + case <-m.done: + m.Logger.Printf("terminating storage of statistics") + return + } + + } +} + +// statistic represents the information returned by a single monitor client. +type statistic struct { + Name string + Tags map[string]string + Values map[string]interface{} +} + +// newStatistic returns a new statistic object. +func newStatistic(name string, tags map[string]string, values map[string]interface{}) *statistic { + return &statistic{ + Name: name, + Tags: tags, + Values: values, + } +} + +// valueNames returns a sorted list of the value names, if any. +func (s *statistic) valueNames() []string { + a := make([]string, 0, len(s.Values)) + for k, _ := range s.Values { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// DiagnosticFromMap returns a Diagnostic from a map. +func DiagnosticFromMap(m map[string]interface{}) *Diagnostic { + // Display columns in deterministic order. + sortedKeys := make([]string, 0, len(m)) + for k, _ := range m { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + d := NewDiagnostic(sortedKeys) + row := make([]interface{}, len(sortedKeys)) + for i, k := range sortedKeys { + row[i] = m[k] + } + d.AddRow(row) + + return d +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/service_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/service_test.go new file mode 100644 index 000000000..e84358592 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/service_test.go @@ -0,0 +1,71 @@ +package monitor + +import ( + "strings" + "testing" + "time" + + "github.com/influxdb/influxdb" + "github.com/influxdb/influxdb/influxql" + "github.com/influxdb/influxdb/meta" +) + +// Test that a registered stats client results in the correct SHOW STATS output. +func Test_RegisterStats(t *testing.T) { + monitor := openMonitor(t) + executor := &StatementExecutor{Monitor: monitor} + + // Register stats without tags. + statMap := influxdb.NewStatistics("foo", "foo", nil) + statMap.Add("bar", 1) + statMap.AddFloat("qux", 2.4) + json := executeShowStatsJSON(t, executor) + if !strings.Contains(json, `"columns":["bar","qux"],"values":[[1,2.4]]`) || !strings.Contains(json, `"name":"foo"`) { + t.Fatalf("SHOW STATS response incorrect, got: %s\n", json) + } + + // Register a client with tags. + statMap = influxdb.NewStatistics("bar", "baz", map[string]string{"proto": "tcp"}) + statMap.Add("bar", 1) + statMap.AddFloat("qux", 2.4) + json = executeShowStatsJSON(t, executor) + if !strings.Contains(json, `"columns":["bar","qux"],"values":[[1,2.4]]`) || + !strings.Contains(json, `"name":"baz"`) || + !strings.Contains(json, `"proto":"tcp"`) { + t.Fatalf("SHOW STATS response incorrect, got: %s\n", json) + + } +} + +type mockMetastore struct{} + +func (m *mockMetastore) ClusterID() (uint64, error) { return 1, nil } +func (m *mockMetastore) NodeID() uint64 { return 2 } +func (m *mockMetastore) WaitForLeader(d time.Duration) error { return nil } +func (m *mockMetastore) SetDefaultRetentionPolicy(database, name string) error { return nil } +func (m *mockMetastore) DropRetentionPolicy(database, name string) error { return nil } +func (m *mockMetastore) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) { + return nil, nil +} +func (m *mockMetastore) CreateRetentionPolicyIfNotExists(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) { + return nil, nil +} + +func openMonitor(t *testing.T) *Monitor { + monitor := New(NewConfig()) + monitor.MetaStore = &mockMetastore{} + err := monitor.Open() + if err != nil { + t.Fatalf("failed to open monitor: %s", err.Error()) + } + return monitor +} + +func executeShowStatsJSON(t *testing.T, s *StatementExecutor) string { + r := s.ExecuteStatement(&influxql.ShowStatsStatement{}) + b, err := r.MarshalJSON() + if err != nil { + t.Fatalf("failed to decode SHOW STATS response: %s", err.Error()) + } + return string(b) +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/statement_executor.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/statement_executor.go new file mode 100644 index 000000000..d6fa1c1b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/statement_executor.go @@ -0,0 +1,65 @@ +package monitor + +import ( + "fmt" + + "github.com/influxdb/influxdb/influxql" +) + +// StatementExecutor translates InfluxQL queries to Monitor methods. +type StatementExecutor struct { + Monitor interface { + Statistics(map[string]string) ([]*statistic, error) + Diagnostics() (map[string]*Diagnostic, error) + } +} + +// ExecuteStatement executes monitor-related query statements. +func (s *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql.Result { + switch stmt := stmt.(type) { + case *influxql.ShowStatsStatement: + return s.executeShowStatistics() + case *influxql.ShowDiagnosticsStatement: + return s.executeShowDiagnostics() + default: + panic(fmt.Sprintf("unsupported statement type: %T", stmt)) + } +} + +func (s *StatementExecutor) executeShowStatistics() *influxql.Result { + stats, err := s.Monitor.Statistics(nil) + if err != nil { + return &influxql.Result{Err: err} + } + rows := make([]*influxql.Row, len(stats)) + + for n, stat := range stats { + row := &influxql.Row{Name: stat.Name, Tags: stat.Tags} + + values := make([]interface{}, 0, len(stat.Values)) + for _, k := range stat.valueNames() { + row.Columns = append(row.Columns, k) + values = append(values, stat.Values[k]) + } + row.Values = [][]interface{}{values} + rows[n] = row + } + return &influxql.Result{Series: rows} +} + +func (s *StatementExecutor) executeShowDiagnostics() *influxql.Result { + diags, err := s.Monitor.Diagnostics() + if err != nil { + return &influxql.Result{Err: err} + } + rows := make([]*influxql.Row, 0, len(diags)) + + for k, v := range diags { + row := &influxql.Row{Name: k} + + row.Columns = v.Columns + row.Values = v.Rows + rows = append(rows, row) + } + return &influxql.Result{Series: rows} +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/system.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/system.go new file mode 100644 index 000000000..1f113f176 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/monitor/system.go @@ -0,0 +1,26 @@ +package monitor + +import ( + "os" + "time" +) + +var startTime time.Time + +func init() { + startTime = time.Now().UTC() +} + +// system captures system-level diagnostics +type system struct{} + +func (s *system) Diagnostics() (*Diagnostic, error) { + diagnostics := map[string]interface{}{ + "PID": os.Getpid(), + "currentTime": time.Now().UTC(), + "started": startTime, + "uptime": time.Since(startTime).String(), + } + + return DiagnosticFromMap(diagnostics), nil +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/nightly.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/nightly.sh new file mode 100644 index 000000000..f41a083de --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/nightly.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +REPO_DIR=`mktemp -d` +echo "Using $REPO_DIR for all work..." + +cd $REPO_DIR +export GOPATH=`pwd` +mkdir -p $GOPATH/src/github.com/influxdb +cd $GOPATH/src/github.com/influxdb +git clone https://github.com/influxdb/influxdb.git + +cd $GOPATH/src/github.com/influxdb/influxdb +NIGHTLY_BUILD=true ./package.sh 0.9.5-nightly-`git log --pretty=format:'%h' -n 1` +rm -rf $REPO_DIR diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/package.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/package.sh new file mode 100644 index 000000000..e3761a143 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/package.sh @@ -0,0 +1,535 @@ +#!/usr/bin/env bash + +########################################################################### +# Packaging script which creates debian and RPM packages. It optionally +# tags the repo with the given version. +# +# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS +# CLI tools must also be installed. +# +# https://github.com/jordansissel/fpm +# http://aws.amazon.com/cli/ +# +# Packaging process: to package a build, simple execute: +# +# package.sh +# +# where is the desired version. If generation of a debian and RPM +# package is successful, the script will offer to tag the repo using the +# supplied version string. +# +# See package.sh -h for options +# +# AWS upload: the script will also offer to upload the packages to S3. If +# this option is selected, the credentials should be present in the file +# ~/aws.conf. The contents should be of the form: +# +# [default] +# aws_access_key_id= +# aws_secret_access_key= +# region = us-east-1 +# +# Trim the leading spaces when creating the file. The script will exit if +# S3 upload is requested, but this file does not exist. + +[ -z $DEBUG ] || set -x + +AWS_FILE=~/aws.conf + +INSTALL_ROOT_DIR=/opt/influxdb +INFLUXDB_LOG_DIR=/var/log/influxdb +INFLUXDB_DATA_DIR=/var/opt/influxdb +CONFIG_ROOT_DIR=/etc/opt/influxdb +LOGROTATE_DIR=/etc/logrotate.d + +SAMPLE_CONFIGURATION=etc/config.sample.toml +INITD_SCRIPT=scripts/init.sh +SYSTEMD_SCRIPT=scripts/influxdb.service +LOGROTATE=scripts/logrotate + +TMP_WORK_DIR=`mktemp -d` +POST_INSTALL_PATH=`mktemp` +ARCH=`uname -i` +LICENSE=MIT +URL=influxdb.com +MAINTAINER=support@influxdb.com +VENDOR=Influxdb +DESCRIPTION="Distributed time-series database" + +# Allow path to FPM to be set by environment variables. Some execution contexts +# like cron don't have PATH set correctly to pick it up. +if [ -z "$FPM" ]; then + FPM=`which fpm` +fi + +GO_VERSION="go1.5" +GOPATH_INSTALL= +BINS=( + influxd + influx + ) + +########################################################################### +# Helper functions. + +# usage prints simple usage information. +usage() { + cat << EOF >&2 +$0 [-h] [-p|-w] [-t ] + -p just build packages + -w build packages for current working directory + imply -p + -t + build package for + can be rpm, tar or deb + can have multiple -t +EOF + cleanup_exit $1 +} + +# cleanup_exit removes all resources created during the process and exits with +# the supplied returned code. +cleanup_exit() { + rm -r $TMP_WORK_DIR + rm $POST_INSTALL_PATH + exit $1 +} + +# current_branch echos the current git branch. +current_branch() { + echo `git rev-parse --abbrev-ref HEAD` +} + +# check_gopath sanity checks the value of the GOPATH env variable, and determines +# the path where build artifacts are installed. GOPATH may be a colon-delimited +# list of directories. +check_gopath() { + [ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1 + GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1` + [ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1 + echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation." +} + +check_gvm() { + if [ -n "$GOPATH" ]; then + existing_gopath=$GOPATH + fi + + source $HOME/.gvm/scripts/gvm + which gvm + if [ $? -ne 0 ]; then + echo "gvm not found -- aborting." + cleanup_exit $1 + fi + gvm use $GO_VERSION + if [ $? -ne 0 ]; then + echo "gvm cannot find Go version $GO_VERSION -- aborting." + cleanup_exit $1 + fi + + # Keep any existing GOPATH set. + if [ -n "$existing_gopath" ]; then + GOPATH=$existing_gopath + fi +} + +# check_clean_tree ensures that no source file is locally modified. +check_clean_tree() { + modified=$(git ls-files --modified | wc -l) + if [ $modified -ne 0 ]; then + echo "The source tree is not clean -- aborting." + cleanup_exit 1 + fi + echo "Git tree is clean." +} + +# update_tree ensures the tree is in-sync with the repo. +update_tree() { + git pull origin $TARGET_BRANCH + if [ $? -ne 0 ]; then + echo "Failed to pull latest code -- aborting." + cleanup_exit 1 + fi + git fetch --tags + if [ $? -ne 0 ]; then + echo "Failed to fetch tags -- aborting." + cleanup_exit 1 + fi + echo "Git tree updated successfully." +} + +# check_tag_exists checks if the existing release already exists in the tags. +check_tag_exists () { + version=$1 + git tag | grep -q "^v$version$" + if [ $? -eq 0 ]; then + echo "Proposed version $version already exists as a tag -- aborting." + cleanup_exit 1 + fi +} + +# make_dir_tree creates the directory structure within the packages. +make_dir_tree() { + work_dir=$1 + version=$2 + mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts + if [ $? -ne 0 ]; then + echo "Failed to create installation directory -- aborting." + cleanup_exit 1 + fi + mkdir -p $work_dir/$CONFIG_ROOT_DIR + if [ $? -ne 0 ]; then + echo "Failed to create configuration directory -- aborting." + cleanup_exit 1 + fi + mkdir -p $work_dir/$LOGROTATE_DIR + if [ $? -ne 0 ]; then + echo "Failed to create logrotate directory -- aborting." + cleanup_exit 1 + fi +} + +# do_build builds the code. The version and commit must be passed in. +do_build() { + for b in ${BINS[*]}; do + rm -f $GOPATH_INSTALL/bin/$b + done + + if [ -n "$WORKING_DIR" ]; then + STASH=`git stash create -a` + if [ $? -ne 0 ]; then + echo "WARNING: failed to stash uncommited local changes" + fi + git reset --hard + fi + + go get -u -f -d ./... + if [ $? -ne 0 ]; then + echo "WARNING: failed to 'go get' packages." + fi + + git checkout $TARGET_BRANCH # go get switches to master, so ensure we're back. + + if [ -n "$WORKING_DIR" ]; then + git stash apply $STASH + if [ $? -ne 0 ]; then #and apply previous uncommited local changes + echo "WARNING: failed to restore uncommited local changes" + fi + fi + + version=$1 + commit=`git rev-parse HEAD` + branch=`current_branch` + if [ $? -ne 0 ]; then + echo "Unable to retrieve current commit -- aborting" + cleanup_exit 1 + fi + + go install -a -ldflags="-X main.version=$version -X main.branch=$branch -X main.commit=$commit" ./... + if [ $? -ne 0 ]; then + echo "Build failed, unable to create package -- aborting" + cleanup_exit 1 + fi + echo "Build completed successfully." +} + +# generate_postinstall_script creates the post-install script for the +# package. It must be passed the version. +generate_postinstall_script() { + version=$1 + cat <$POST_INSTALL_PATH +rm -f $INSTALL_ROOT_DIR/influxd +rm -f $INSTALL_ROOT_DIR/influx +rm -f $INSTALL_ROOT_DIR/init.sh +ln -s $INSTALL_ROOT_DIR/versions/$version/influxd $INSTALL_ROOT_DIR/influxd +ln -s $INSTALL_ROOT_DIR/versions/$version/influx $INSTALL_ROOT_DIR/influx +ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh + +if ! id influxdb >/dev/null 2>&1; then + useradd --system -U -M influxdb +fi + +# Systemd +if which systemctl > /dev/null 2>&1 ; then + cp $INSTALL_ROOT_DIR/versions/$version/scripts/influxdb.service \ + /lib/systemd/system/influxdb.service + systemctl enable influxdb + +# Sysv +else + rm -f /etc/init.d/influxdb + ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/influxdb + chmod +x /etc/init.d/influxdb + if which update-rc.d > /dev/null 2>&1 ; then + update-rc.d -f influxdb remove + update-rc.d influxdb defaults + else + chkconfig --add influxdb + fi +fi + +chown -R -L influxdb:influxdb $INSTALL_ROOT_DIR +chmod -R a+rX $INSTALL_ROOT_DIR + +mkdir -p $INFLUXDB_LOG_DIR +chown -R -L influxdb:influxdb $INFLUXDB_LOG_DIR +mkdir -p $INFLUXDB_DATA_DIR +chown -R -L influxdb:influxdb $INFLUXDB_DATA_DIR +EOF + echo "Post-install script created successfully at $POST_INSTALL_PATH" +} + +########################################################################### +# Process options +while : +do + case $1 in + -h | --help) + usage 0 + ;; + + -p | --packages-only) + PACKAGES_ONLY="PACKAGES_ONLY" + shift + ;; + + -t | --target) + case "$2" in + 'tar') TAR_WANTED="gz" + ;; + 'deb') DEB_WANTED="deb" + ;; + 'rpm') RPM_WANTED="rpm" + ;; + *) + echo "Unknown target distribution $2" + usage 1 + ;; + esac + shift 2 + ;; + + -w | --working-directory) + PACKAGES_ONLY="PACKAGES_ONLY" + WORKING_DIR="WORKING_DIR" + shift + ;; + + -*) + echo "Unknown option $1" + usage 1 + ;; + + ?*) + if [ -z $VERSION ]; then + VERSION=$1 + VERSION_UNDERSCORED=`echo "$VERSION" | tr - _` + shift + else + echo "$1 : aborting version already set to $VERSION" + usage 1 + fi + ;; + + *) break + esac +done + +if [ -z "$DEB_WANTED$RPM_WANTED$TAR_WANTED" ]; then + TAR_WANTED="gz" + DEB_WANTED="deb" + RPM_WANTED="rpm" +fi + +if [ -z "$VERSION" ]; then + echo -e "Missing version" + usage 1 +fi + +########################################################################### +# Start the packaging process. + +echo -e "\nStarting package process...\n" + +# Ensure the current is correct. +TARGET_BRANCH=`current_branch` +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then +echo -n "Current branch is $TARGET_BRANCH. Start packaging this branch? [Y/n] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` + if [ "x$response" == "xn" ]; then + echo "Packaging aborted." + cleanup_exit 1 + fi +fi + +check_gvm +check_gopath +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + check_clean_tree + update_tree + check_tag_exists $VERSION +fi + +do_build $VERSION +make_dir_tree $TMP_WORK_DIR $VERSION + +########################################################################### +# Copy the assets to the installation directories. + +for b in ${BINS[*]}; do + cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION + if [ $? -ne 0 ]; then + echo "Failed to copy binaries to packaging directory -- aborting." + cleanup_exit 1 + fi +done +echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION" + +cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts +if [ $? -ne 0 ]; then + echo "Failed to copy init.d script to packaging directory -- aborting." + cleanup_exit 1 +fi +echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts" + +cp $SYSTEMD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts +if [ $? -ne 0 ]; then + echo "Failed to copy systemd script to packaging directory -- aborting." + cleanup_exit 1 +fi +echo "$SYSTEMD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts" + +cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/influxdb.conf +if [ $? -ne 0 ]; then + echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting." + cleanup_exit 1 +fi + +cp $LOGROTATE $TMP_WORK_DIR/$LOGROTATE_DIR/influxd +if [ $? -ne 0 ]; then + echo "Failed to copy logrotate configuration to packaging directory -- aborting." + cleanup_exit 1 +fi + +generate_postinstall_script $VERSION + +########################################################################### +# Create the actual packages. + +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` + if [ "x$response" == "xn" ]; then + echo "Packaging aborted." + cleanup_exit 1 + fi +fi + +if [ $ARCH == "i386" ]; then + rpm_package=influxdb-${VERSION}-1.i686.rpm # RPM packages use 1 for default package release. + debian_package=influxdb_${VERSION}_i686.deb + deb_args="-a i686" + rpm_args="setarch i686" +elif [ $ARCH == "arm" ]; then + rpm_package=influxdb-${VERSION}-1.armel.rpm + debian_package=influxdb_${VERSION}_armel.deb +else + rpm_package=influxdb-${VERSION}-1.x86_64.rpm + debian_package=influxdb_${VERSION}_amd64.deb +fi + +COMMON_FPM_ARGS="--log error -C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH --name influxdb --version $VERSION --config-files $CONFIG_ROOT_DIR --config-files $LOGROTATE_DIR ." + +if [ -n "$DEB_WANTED" ]; then + $FPM -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS + if [ $? -ne 0 ]; then + echo "Failed to create Debian package -- aborting." + cleanup_exit 1 + fi + echo "Debian package created successfully." +fi + +if [ -n "$TAR_WANTED" ]; then + $FPM -s dir -t tar --prefix influxdb_${VERSION}_${ARCH} -p influxdb_${VERSION}_${ARCH}.tar.gz --description "$DESCRIPTION" $COMMON_FPM_ARGS + if [ $? -ne 0 ]; then + echo "Failed to create Tar package -- aborting." + cleanup_exit 1 + fi + echo "Tar package created successfully." +fi + +if [ -n "$RPM_WANTED" ]; then + $rpm_args $FPM -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS + if [ $? -ne 0 ]; then + echo "Failed to create RPM package -- aborting." + cleanup_exit 1 + fi + echo "RPM package created successfully." +fi + +########################################################################### +# Offer to tag the repo. + +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + echo -n "Tag source tree with v$VERSION and push to repo? [y/N] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` + if [ "x$response" == "xy" ]; then + echo "Creating tag v$VERSION and pushing to repo" + git tag v$VERSION + if [ $? -ne 0 ]; then + echo "Failed to create tag v$VERSION -- aborting" + cleanup_exit 1 + fi + echo "Tag v$VERSION created" + git push origin v$VERSION + if [ $? -ne 0 ]; then + echo "Failed to push tag v$VERSION to repo -- aborting" + cleanup_exit 1 + fi + echo "Tag v$VERSION pushed to repo" + else + echo "Not creating tag v$VERSION." + fi +fi + +########################################################################### +# Offer to publish the packages. + +if [ -z "$NIGHTLY_BUILD" -a -z "$PACKAGES_ONLY" ]; then + echo -n "Publish packages to S3? [y/N] " + read response + response=`echo $response | tr 'A-Z' 'a-z'` +fi + +if [ "x$response" == "xy" -o -n "$NIGHTLY_BUILD" ]; then + echo "Publishing packages to S3." + if [ ! -e "$AWS_FILE" ]; then + echo "$AWS_FILE does not exist -- aborting." + cleanup_exit 1 + fi + + for filepath in `ls *.{$DEB_WANTED,$RPM_WANTED,$TAR_WANTED} 2> /dev/null`; do + filename=`basename $filepath` + if [ -n "$NIGHTLY_BUILD" ]; then + filename=`echo $filename | sed s/$VERSION/nightly/` + filename=`echo $filename | sed s/$VERSION_UNDERSCORED/nightly/` + fi + AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://influxdb/$filename --acl public-read --region us-east-1 + if [ $? -ne 0 ]; then + echo "Upload failed ($filename) -- aborting". + cleanup_exit 1 + fi + echo "$filename uploaded" + done +else + echo "Not publishing packages to S3." +fi + +########################################################################### +# All done. + +echo -e "\nPackaging process complete." +cleanup_exit 0 diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/influxdb.service b/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/influxdb.service new file mode 100644 index 000000000..e8633acb0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/influxdb.service @@ -0,0 +1,19 @@ +# If you modify this, please also make sure to edit init.sh + +[Unit] +Description=InfluxDB is an open-source, distributed, time series database +Documentation=https://influxdb.com/docs/ +After=network.target + +[Service] +User=influxdb +Group=influxdb +LimitNOFILE=65536 +EnvironmentFile=-/etc/default/influxdb +ExecStart=/opt/influxdb/influxd -config /etc/opt/influxdb/influxdb.conf $INFLUXD_OPTS +KillMode=process +Restart=on-failure + +[Install] +WantedBy=multi-user.target +Alias=influxd.service diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/init.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/init.sh new file mode 100644 index 000000000..4cdc136bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/init.sh @@ -0,0 +1,218 @@ +#! /usr/bin/env bash + +### BEGIN INIT INFO +# Provides: influxd +# Required-Start: $all +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start influxd at boot time +### END INIT INFO + +# If you modify this, please make sure to also edit influxdb.service +# this init script supports three different variations: +# 1. New lsb that define start-stop-daemon +# 2. Old lsb that don't have start-stop-daemon but define, log, pidofproc and killproc +# 3. Centos installations without lsb-core installed +# +# In the third case we have to define our own functions which are very dumb +# and expect the args to be positioned correctly. + +# Command-line options that can be set in /etc/default/influxdb. These will override +# any config file values. Example: "-join http://1.2.3.4:8086" +DEFAULT=/etc/default/influxdb + +# Daemon options +INFLUXD_OPTS= + +# Process name ( For display ) +NAME=influxdb + +# User and group +USER=influxdb +GROUP=influxdb + +# Daemon name, where is the actual executable +# If the daemon is not there, then exit. +DAEMON=/opt/influxdb/influxd +[ -x $DAEMON ] || exit 5 + +# Configuration file +CONFIG=/etc/opt/influxdb/influxdb.conf + +# PID file for the daemon +PIDFILE=/var/run/influxdb/influxd.pid +PIDDIR=`dirname $PIDFILE` +if [ ! -d "$PIDDIR" ]; then + mkdir -p $PIDDIR + chown $USER:$GROUP $PIDDIR +fi + +# Max open files +OPEN_FILE_LIMIT=65536 + +if [ -r /lib/lsb/init-functions ]; then + source /lib/lsb/init-functions +fi + +# Logging +if [ -z "$STDOUT" ]; then + STDOUT=/dev/null +fi + +if [ ! -f "$STDOUT" ]; then + mkdir -p $(dirname $STDOUT) +fi + +if [ -z "$STDERR" ]; then + STDERR=/var/log/influxdb/influxd.log +fi + +if [ ! -f "$STDERR" ]; then + mkdir -p $(dirname $STDERR) +fi + +# Overwrite init script variables with /etc/default/influxdb values +if [ -r $DEFAULT ]; then + source $DEFAULT +fi + +function pidofproc() { + if [ $# -ne 3 ]; then + echo "Expected three arguments, e.g. $0 -p pidfile daemon-name" + fi + + PID=`pgrep -f $3` + local PIDFILE=`cat $2` + + if [ "x$PIDFILE" == "x" ]; then + return 1 + fi + + if [ "x$PID" != "x" -a "$PIDFILE" == "$PID" ]; then + return 0 + fi + + return 1 +} + +function killproc() { + if [ $# -ne 3 ]; then + echo "Expected three arguments, e.g. $0 -p pidfile signal" + fi + + PID=`cat $2` + + /bin/kill -s $3 $PID + while true; do + pidof `basename $DAEMON` >/dev/null + if [ $? -ne 0 ]; then + return 0 + fi + + sleep 1 + n=$(expr $n + 1) + if [ $n -eq 30 ]; then + /bin/kill -s SIGKILL $PID + return 0 + fi + done +} + +function log_failure_msg() { + echo "$@" "[ FAILED ]" +} + +function log_success_msg() { + echo "$@" "[ OK ]" +} + +case $1 in + start) + # Check if config file exist + if [ ! -r $CONFIG ]; then + log_failure_msg "config file doesn't exists" + exit 4 + fi + + # Checked the PID file exists and check the actual status of process + if [ -e $PIDFILE ]; then + pidofproc -p $PIDFILE $DAEMON > /dev/null 2>&1 && STATUS="0" || STATUS="$?" + # If the status is SUCCESS then don't need to start again. + if [ "x$STATUS" = "x0" ]; then + log_failure_msg "$NAME process is running" + exit 0 # Exit + fi + # if PID file does not exist, check if writable + else + su -c "touch $PIDFILE" $USER > /dev/null 2>&1 + if [ $? -ne 0 ]; then + log_failure_msg "$PIDFILE not writable, check permissions" + exit 5 + fi + fi + + # Bump the file limits, before launching the daemon. These will carry over to + # launched processes. + ulimit -n $OPEN_FILE_LIMIT + if [ $? -ne 0 ]; then + log_failure_msg "set open file limit to $OPEN_FILE_LIMIT" + exit 1 + fi + + log_success_msg "Starting the process" "$NAME" + if which start-stop-daemon > /dev/null 2>&1; then + start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $PIDFILE --exec $DAEMON -- -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR & + else + su -s /bin/sh -c "nohup $DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &" $USER + fi + log_success_msg "$NAME process was started" + ;; + + stop) + # Stop the daemon. + if [ -e $PIDFILE ]; then + pidofproc -p $PIDFILE $DAEMON > /dev/null 2>&1 && STATUS="0" || STATUS="$?" + if [ "$STATUS" = 0 ]; then + if killproc -p $PIDFILE SIGTERM && /bin/rm -rf $PIDFILE; then + log_success_msg "$NAME process was stopped" + else + log_failure_msg "$NAME failed to stop service" + fi + fi + else + log_failure_msg "$NAME process is not running" + fi + ;; + + restart) + # Restart the daemon. + $0 stop && sleep 2 && $0 start + ;; + + status) + # Check the status of the process. + if [ -e $PIDFILE ]; then + if pidofproc -p $PIDFILE $DAEMON > /dev/null; then + log_success_msg "$NAME Process is running" + exit 0 + else + log_failure_msg "$NAME Process is not running" + exit 1 + fi + else + log_failure_msg "$NAME Process is not running" + exit 3 + fi + ;; + + version) + $DAEMON version + ;; + + *) + # For invalid arguments, print the usage message. + echo "Usage: $0 {start|stop|restart|status|version}" + exit 2 + ;; +esac diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/logrotate b/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/logrotate new file mode 100644 index 000000000..de410d48c --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/logrotate @@ -0,0 +1,8 @@ +/var/log/influxdb/influxd.log { + daily + rotate 7 + missingok + dateext + copytruncate + compress +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config.go new file mode 100644 index 000000000..860dff864 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config.go @@ -0,0 +1,21 @@ +package admin + +const ( + // DefaultBindAddress is the default bind address for the HTTP server. + DefaultBindAddress = ":8083" +) + +type Config struct { + Enabled bool `toml:"enabled"` + BindAddress string `toml:"bind-address"` + HttpsEnabled bool `toml:"https-enabled"` + HttpsCertificate string `toml:"https-certificate"` +} + +func NewConfig() Config { + return Config{ + BindAddress: DefaultBindAddress, + HttpsEnabled: false, + HttpsCertificate: "/etc/ssl/influxdb.pem", + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go new file mode 100644 index 000000000..1b0422505 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go @@ -0,0 +1,32 @@ +package admin_test + +import ( + "testing" + + "github.com/BurntSushi/toml" + "github.com/influxdb/influxdb/services/admin" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c admin.Config + if _, err := toml.Decode(` +enabled = true +bind-address = ":8083" +https-enabled = true +https-certificate = "/dev/null" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Enabled != true { + t.Fatalf("unexpected enabled: %v", c.Enabled) + } else if c.BindAddress != ":8083" { + t.Fatalf("unexpected bind address: %s", c.BindAddress) + } else if c.HttpsEnabled != true { + t.Fatalf("unexpected https enabled: %v", c.HttpsEnabled) + } else if c.HttpsCertificate != "/dev/null" { + t.Fatalf("unexpected https certificate: %v", c.HttpsCertificate) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service.go new file mode 100644 index 000000000..2618bdb6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service.go @@ -0,0 +1,111 @@ +package admin + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "net/http" + "os" + "strings" + + // Register static assets via statik. + _ "github.com/influxdb/influxdb/statik" + "github.com/rakyll/statik/fs" +) + +// Service manages the listener for an admin endpoint. +type Service struct { + listener net.Listener + addr string + https bool + cert string + err chan error + + logger *log.Logger +} + +// NewService returns a new instance of Service. +func NewService(c Config) *Service { + return &Service{ + addr: c.BindAddress, + https: c.HttpsEnabled, + cert: c.HttpsCertificate, + err: make(chan error), + logger: log.New(os.Stderr, "[admin] ", log.LstdFlags), + } +} + +// Open starts the service +func (s *Service) Open() error { + s.logger.Printf("Starting admin service") + + // Open listener. + if s.https { + cert, err := tls.LoadX509KeyPair(s.cert, s.cert) + if err != nil { + return err + } + + listener, err := tls.Listen("tcp", s.addr, &tls.Config{ + Certificates: []tls.Certificate{cert}, + }) + if err != nil { + return err + } + + s.logger.Println("Listening on HTTPS:", listener.Addr().String()) + s.listener = listener + } else { + listener, err := net.Listen("tcp", s.addr) + if err != nil { + return err + } + + s.logger.Println("Listening on HTTP:", listener.Addr().String()) + s.listener = listener + } + + // Begin listening for requests in a separate goroutine. + go s.serve() + return nil +} + +// Close closes the underlying listener. +func (s *Service) Close() error { + if s.listener != nil { + return s.listener.Close() + } + return nil +} + +// SetLogger sets the internal logger to the logger passed in. +func (s *Service) SetLogger(l *log.Logger) { + s.logger = l +} + +// Err returns a channel for fatal errors that occur on the listener. +func (s *Service) Err() <-chan error { return s.err } + +// Addr returns the listener's address. Returns nil if listener is closed. +func (s *Service) Addr() net.Addr { + if s.listener != nil { + return s.listener.Addr() + } + return nil +} + +// serve serves the handler from the listener. +func (s *Service) serve() { + // Instantiate file system from embedded admin. + statikFS, err := fs.New() + if err != nil { + panic(err) + } + + // Run file system handler on listener. + err = http.Serve(s.listener, http.FileServer(statikFS)) + if err != nil && !strings.Contains(err.Error(), "closed") { + s.err <- fmt.Errorf("listener error: addr=%s, err=%s", s.Addr(), err) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go new file mode 100644 index 000000000..497b12ea3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go @@ -0,0 +1,33 @@ +package admin_test + +import ( + "io/ioutil" + "net/http" + "testing" + + "github.com/influxdb/influxdb/services/admin" +) + +// Ensure service can serve the root index page of the admin. +func TestService_Index(t *testing.T) { + // Start service on random port. + s := admin.NewService(admin.Config{BindAddress: "127.0.0.1:0"}) + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + + // Request root index page. + resp, err := http.Get("http://" + s.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + // Validate status code and body. + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status: %d", resp.StatusCode) + } else if _, err := ioutil.ReadAll(resp.Body); err != nil { + t.Fatalf("unable to read body: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf new file mode 100644 index 000000000..97cc4cc08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf @@ -0,0 +1,209 @@ +absolute value:ABSOLUTE:0:U +apache_bytes value:DERIVE:0:U +apache_connections value:GAUGE:0:65535 +apache_idle_workers value:GAUGE:0:65535 +apache_requests value:DERIVE:0:U +apache_scoreboard value:GAUGE:0:65535 +ath_nodes value:GAUGE:0:65535 +ath_stat value:DERIVE:0:U +backends value:GAUGE:0:65535 +bitrate value:GAUGE:0:4294967295 +bytes value:GAUGE:0:U +cache_eviction value:DERIVE:0:U +cache_operation value:DERIVE:0:U +cache_ratio value:GAUGE:0:100 +cache_result value:DERIVE:0:U +cache_size value:GAUGE:0:U +charge value:GAUGE:0:U +compression_ratio value:GAUGE:0:2 +compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U +connections value:DERIVE:0:U +conntrack value:GAUGE:0:4294967295 +contextswitch value:DERIVE:0:U +counter value:COUNTER:U:U +cpufreq value:GAUGE:0:U +cpu value:DERIVE:0:U +current_connections value:GAUGE:0:U +current_sessions value:GAUGE:0:U +current value:GAUGE:U:U +delay value:GAUGE:-1000000:1000000 +derive value:DERIVE:0:U +df_complex value:GAUGE:0:U +df_inodes value:GAUGE:0:U +df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623 +disk_latency read:GAUGE:0:U, write:GAUGE:0:U +disk_merged read:DERIVE:0:U, write:DERIVE:0:U +disk_octets read:DERIVE:0:U, write:DERIVE:0:U +disk_ops_complex value:DERIVE:0:U +disk_ops read:DERIVE:0:U, write:DERIVE:0:U +disk_time read:DERIVE:0:U, write:DERIVE:0:U +dns_answer value:DERIVE:0:U +dns_notify value:DERIVE:0:U +dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U +dns_opcode value:DERIVE:0:U +dns_qtype_cached value:GAUGE:0:4294967295 +dns_qtype value:DERIVE:0:U +dns_query value:DERIVE:0:U +dns_question value:DERIVE:0:U +dns_rcode value:DERIVE:0:U +dns_reject value:DERIVE:0:U +dns_request value:DERIVE:0:U +dns_resolver value:DERIVE:0:U +dns_response value:DERIVE:0:U +dns_transfer value:DERIVE:0:U +dns_update value:DERIVE:0:U +dns_zops value:DERIVE:0:U +duration seconds:GAUGE:0:U +email_check value:GAUGE:0:U +email_count value:GAUGE:0:U +email_size value:GAUGE:0:U +entropy value:GAUGE:0:4294967295 +fanspeed value:GAUGE:0:U +file_size value:GAUGE:0:U +files value:GAUGE:0:U +flow value:GAUGE:0:U +fork_rate value:DERIVE:0:U +frequency_offset value:GAUGE:-1000000:1000000 +frequency value:GAUGE:0:U +fscache_stat value:DERIVE:0:U +gauge value:GAUGE:U:U +hash_collisions value:DERIVE:0:U +http_request_methods value:DERIVE:0:U +http_requests value:DERIVE:0:U +http_response_codes value:DERIVE:0:U +humidity value:GAUGE:0:100 +if_collisions value:DERIVE:0:U +if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U +if_errors rx:DERIVE:0:U, tx:DERIVE:0:U +if_multicast value:DERIVE:0:U +if_octets rx:DERIVE:0:U, tx:DERIVE:0:U +if_packets rx:DERIVE:0:U, tx:DERIVE:0:U +if_rx_errors value:DERIVE:0:U +if_rx_octets value:DERIVE:0:U +if_tx_errors value:DERIVE:0:U +if_tx_octets value:DERIVE:0:U +invocations value:DERIVE:0:U +io_octets rx:DERIVE:0:U, tx:DERIVE:0:U +io_packets rx:DERIVE:0:U, tx:DERIVE:0:U +ipt_bytes value:DERIVE:0:U +ipt_packets value:DERIVE:0:U +irq value:DERIVE:0:U +latency value:GAUGE:0:U +links value:GAUGE:0:U +load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000 +md_disks value:GAUGE:0:U +memcached_command value:DERIVE:0:U +memcached_connections value:GAUGE:0:U +memcached_items value:GAUGE:0:U +memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U +memcached_ops value:DERIVE:0:U +memory value:GAUGE:0:281474976710656 +multimeter value:GAUGE:U:U +mutex_operations value:DERIVE:0:U +mysql_commands value:DERIVE:0:U +mysql_handler value:DERIVE:0:U +mysql_locks value:DERIVE:0:U +mysql_log_position value:DERIVE:0:U +mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U +nfs_procedure value:DERIVE:0:U +nginx_connections value:GAUGE:0:U +nginx_requests value:DERIVE:0:U +node_octets rx:DERIVE:0:U, tx:DERIVE:0:U +node_rssi value:GAUGE:0:255 +node_stat value:DERIVE:0:U +node_tx_rate value:GAUGE:0:127 +objects value:GAUGE:0:U +operations value:DERIVE:0:U +percent value:GAUGE:0:100.1 +percent_bytes value:GAUGE:0:100.1 +percent_inodes value:GAUGE:0:100.1 +pf_counters value:DERIVE:0:U +pf_limits value:DERIVE:0:U +pf_source value:DERIVE:0:U +pf_states value:GAUGE:0:U +pf_state value:DERIVE:0:U +pg_blks value:DERIVE:0:U +pg_db_size value:GAUGE:0:U +pg_n_tup_c value:DERIVE:0:U +pg_n_tup_g value:GAUGE:0:U +pg_numbackends value:GAUGE:0:U +pg_scan value:DERIVE:0:U +pg_xact value:DERIVE:0:U +ping_droprate value:GAUGE:0:100 +ping_stddev value:GAUGE:0:65535 +ping value:GAUGE:0:65535 +players value:GAUGE:0:1000000 +power value:GAUGE:0:U +protocol_counter value:DERIVE:0:U +ps_code value:GAUGE:0:9223372036854775807 +ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000 +ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U +ps_data value:GAUGE:0:9223372036854775807 +ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U +ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U +ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U +ps_rss value:GAUGE:0:9223372036854775807 +ps_stacksize value:GAUGE:0:9223372036854775807 +ps_state value:GAUGE:0:65535 +ps_vm value:GAUGE:0:9223372036854775807 +queue_length value:GAUGE:0:U +records value:GAUGE:0:U +requests value:GAUGE:0:U +response_time value:GAUGE:0:U +response_code value:GAUGE:0:U +route_etx value:GAUGE:0:U +route_metric value:GAUGE:0:U +routes value:GAUGE:0:U +serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U +signal_noise value:GAUGE:U:0 +signal_power value:GAUGE:U:0 +signal_quality value:GAUGE:0:U +snr value:GAUGE:0:U +spam_check value:GAUGE:0:U +spam_score value:GAUGE:U:U +spl value:GAUGE:U:U +swap_io value:DERIVE:0:U +swap value:GAUGE:0:1099511627776 +tcp_connections value:GAUGE:0:4294967295 +temperature value:GAUGE:U:U +threads value:GAUGE:0:U +time_dispersion value:GAUGE:-1000000:1000000 +timeleft value:GAUGE:0:U +time_offset value:GAUGE:-1000000:1000000 +total_bytes value:DERIVE:0:U +total_connections value:DERIVE:0:U +total_objects value:DERIVE:0:U +total_operations value:DERIVE:0:U +total_requests value:DERIVE:0:U +total_sessions value:DERIVE:0:U +total_threads value:DERIVE:0:U +total_time_in_ms value:DERIVE:0:U +total_values value:DERIVE:0:U +uptime value:GAUGE:0:4294967295 +users value:GAUGE:0:65535 +vcl value:GAUGE:0:65535 +vcpu value:GAUGE:0:U +virt_cpu_total value:DERIVE:0:U +virt_vcpu value:DERIVE:0:U +vmpage_action value:DERIVE:0:U +vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U +vmpage_io in:DERIVE:0:U, out:DERIVE:0:U +vmpage_number value:GAUGE:0:4294967295 +volatile_changes value:GAUGE:0:U +voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U +voltage value:GAUGE:U:U +vs_memory value:GAUGE:0:9223372036854775807 +vs_processes value:GAUGE:0:65535 +vs_threads value:GAUGE:0:65535 + +# +# Legacy types +# (required for the v5 upgrade target) +# +arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U +arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U +arc_l2_size value:GAUGE:0:U +arc_ratio value:GAUGE:0:U +arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U +mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U +mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go new file mode 100644 index 000000000..427598ca6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go @@ -0,0 +1,48 @@ +package collectd + +import ( + "time" + + "github.com/influxdb/influxdb/toml" +) + +const ( + DefaultBindAddress = ":25826" + + DefaultDatabase = "collectd" + + DefaultRetentionPolicy = "" + + DefaultBatchSize = 1000 + + DefaultBatchPending = 5 + + DefaultBatchDuration = toml.Duration(10 * time.Second) + + DefaultTypesDB = "/usr/share/collectd/types.db" +) + +// Config represents a configuration for the collectd service. +type Config struct { + Enabled bool `toml:"enabled"` + BindAddress string `toml:"bind-address"` + Database string `toml:"database"` + RetentionPolicy string `toml:"retention-policy"` + BatchSize int `toml:"batch-size"` + BatchPending int `toml:"batch-pending"` + BatchDuration toml.Duration `toml:"batch-timeout"` + TypesDB string `toml:"typesdb"` +} + +// NewConfig returns a new instance of Config with defaults. +func NewConfig() Config { + return Config{ + BindAddress: DefaultBindAddress, + Database: DefaultDatabase, + RetentionPolicy: DefaultRetentionPolicy, + BatchSize: DefaultBatchSize, + BatchPending: DefaultBatchPending, + BatchDuration: DefaultBatchDuration, + TypesDB: DefaultTypesDB, + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go new file mode 100644 index 000000000..c419dcfa9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go @@ -0,0 +1,32 @@ +package collectd_test + +import ( + "testing" + + "github.com/BurntSushi/toml" + "github.com/influxdb/influxdb/services/collectd" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c collectd.Config + if _, err := toml.Decode(` +enabled = true +bind-address = ":9000" +database = "xxx" +typesdb = "yyy" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Enabled != true { + t.Fatalf("unexpected enabled: %v", c.Enabled) + } else if c.BindAddress != ":9000" { + t.Fatalf("unexpected bind address: %s", c.BindAddress) + } else if c.Database != "xxx" { + t.Fatalf("unexpected database: %s", c.Database) + } else if c.TypesDB != "yyy" { + t.Fatalf("unexpected types db: %s", c.TypesDB) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go new file mode 100644 index 000000000..35a31049a --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go @@ -0,0 +1,307 @@ +package collectd + +import ( + "expvar" + "fmt" + "log" + "net" + "os" + "strings" + "sync" + "time" + + "github.com/influxdb/influxdb" + "github.com/influxdb/influxdb/cluster" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/tsdb" + "github.com/kimor79/gollectd" +) + +const leaderWaitTimeout = 30 * time.Second + +// statistics gathered by the collectd service. +const ( + statPointsReceived = "points_rx" + statBytesReceived = "bytes_rx" + statPointsParseFail = "points_parse_fail" + statReadFail = "read_fail" + statBatchesTrasmitted = "batches_tx" + statPointsTransmitted = "points_tx" + statBatchesTransmitFail = "batches_tx_fail" +) + +// pointsWriter is an internal interface to make testing easier. +type pointsWriter interface { + WritePoints(p *cluster.WritePointsRequest) error +} + +// metaStore is an internal interface to make testing easier. +type metaStore interface { + WaitForLeader(d time.Duration) error + CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) +} + +// Service represents a UDP server which receives metrics in collectd's binary +// protocol and stores them in InfluxDB. +type Service struct { + Config *Config + MetaStore metaStore + PointsWriter pointsWriter + Logger *log.Logger + + wg sync.WaitGroup + err chan error + stop chan struct{} + ln *net.UDPConn + batcher *tsdb.PointBatcher + typesdb gollectd.Types + addr net.Addr + + // expvar-based stats. + statMap *expvar.Map +} + +// NewService returns a new instance of the collectd service. +func NewService(c Config) *Service { + s := &Service{ + Config: &c, + Logger: log.New(os.Stderr, "[collectd] ", log.LstdFlags), + err: make(chan error), + } + + return s +} + +// Open starts the service. +func (s *Service) Open() error { + s.Logger.Printf("Starting collectd service") + + // Configure expvar monitoring. It's OK to do this even if the service fails to open and + // should be done before any data could arrive for the service. + key := strings.Join([]string{"collectd", s.Config.BindAddress}, ":") + tags := map[string]string{"bind": s.Config.BindAddress} + s.statMap = influxdb.NewStatistics(key, "collectd", tags) + + if s.Config.BindAddress == "" { + return fmt.Errorf("bind address is blank") + } else if s.Config.Database == "" { + return fmt.Errorf("database name is blank") + } else if s.PointsWriter == nil { + return fmt.Errorf("PointsWriter is nil") + } + + if err := s.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil { + s.Logger.Printf("Failed to detect a cluster leader: %s", err.Error()) + return err + } + + if _, err := s.MetaStore.CreateDatabaseIfNotExists(s.Config.Database); err != nil { + s.Logger.Printf("Failed to ensure target database %s exists: %s", s.Config.Database, err.Error()) + return err + } + + if s.typesdb == nil { + // Open collectd types. + typesdb, err := gollectd.TypesDBFile(s.Config.TypesDB) + if err != nil { + return fmt.Errorf("Open(): %s", err) + } + s.typesdb = typesdb + } + + // Resolve our address. + addr, err := net.ResolveUDPAddr("udp", s.Config.BindAddress) + if err != nil { + return fmt.Errorf("unable to resolve UDP address: %s", err) + } + s.addr = addr + + // Start listening + ln, err := net.ListenUDP("udp", addr) + if err != nil { + return fmt.Errorf("unable to listen on UDP: %s", err) + } + s.ln = ln + + s.Logger.Println("Listening on UDP: ", ln.LocalAddr().String()) + + // Start the points batcher. + s.batcher = tsdb.NewPointBatcher(s.Config.BatchSize, s.Config.BatchPending, time.Duration(s.Config.BatchDuration)) + s.batcher.Start() + + // Create channel and wait group for signalling goroutines to stop. + s.stop = make(chan struct{}) + s.wg.Add(2) + + // Start goroutines that process collectd packets. + go s.serve() + go s.writePoints() + + return nil +} + +// Close stops the service. +func (s *Service) Close() error { + // Close the connection, and wait for the goroutine to exit. + if s.stop != nil { + close(s.stop) + } + if s.ln != nil { + s.ln.Close() + } + if s.batcher != nil { + s.batcher.Stop() + } + s.wg.Wait() + + // Release all remaining resources. + s.stop = nil + s.ln = nil + s.batcher = nil + s.Logger.Println("collectd UDP closed") + return nil +} + +// SetLogger sets the internal logger to the logger passed in. +func (s *Service) SetLogger(l *log.Logger) { + s.Logger = l +} + +// SetTypes sets collectd types db. +func (s *Service) SetTypes(types string) (err error) { + s.typesdb, err = gollectd.TypesDB([]byte(types)) + return +} + +// Err returns a channel for fatal errors that occur on go routines. +func (s *Service) Err() chan error { return s.err } + +// Addr returns the listener's address. Returns nil if listener is closed. +func (s *Service) Addr() net.Addr { + return s.ln.LocalAddr() +} + +func (s *Service) serve() { + defer s.wg.Done() + + // From https://collectd.org/wiki/index.php/Binary_protocol + // 1024 bytes (payload only, not including UDP / IP headers) + // In versions 4.0 through 4.7, the receive buffer has a fixed size + // of 1024 bytes. When longer packets are received, the trailing data + // is simply ignored. Since version 4.8, the buffer size can be + // configured. Version 5.0 will increase the default buffer size to + // 1452 bytes (the maximum payload size when using UDP/IPv6 over + // Ethernet). + buffer := make([]byte, 1452) + + for { + select { + case <-s.stop: + // We closed the connection, time to go. + return + default: + // Keep processing. + } + + n, _, err := s.ln.ReadFromUDP(buffer) + if err != nil { + s.statMap.Add(statReadFail, 1) + s.Logger.Printf("collectd ReadFromUDP error: %s", err) + continue + } + if n > 0 { + s.statMap.Add(statBytesReceived, int64(n)) + s.handleMessage(buffer[:n]) + } + } +} + +func (s *Service) handleMessage(buffer []byte) { + packets, err := gollectd.Packets(buffer, s.typesdb) + if err != nil { + s.statMap.Add(statPointsParseFail, 1) + s.Logger.Printf("Collectd parse error: %s", err) + return + } + for _, packet := range *packets { + points := Unmarshal(&packet) + for _, p := range points { + s.batcher.In() <- p + } + s.statMap.Add(statPointsReceived, int64(len(points))) + } +} + +func (s *Service) writePoints() { + defer s.wg.Done() + + for { + select { + case <-s.stop: + return + case batch := <-s.batcher.Out(): + if err := s.PointsWriter.WritePoints(&cluster.WritePointsRequest{ + Database: s.Config.Database, + RetentionPolicy: s.Config.RetentionPolicy, + ConsistencyLevel: cluster.ConsistencyLevelAny, + Points: batch, + }); err == nil { + s.statMap.Add(statBatchesTrasmitted, 1) + s.statMap.Add(statPointsTransmitted, int64(len(batch))) + } else { + s.Logger.Printf("failed to write point batch to database %q: %s", s.Config.Database, err) + s.statMap.Add(statBatchesTransmitFail, 1) + } + } + } +} + +// Unmarshal translates a collectd packet into InfluxDB data points. +func Unmarshal(packet *gollectd.Packet) []tsdb.Point { + // Prefer high resolution timestamp. + var timestamp time.Time + if packet.TimeHR > 0 { + // TimeHR is "near" nanosecond measurement, but not exactly nanasecond time + // Since we store time in microseconds, we round here (mostly so tests will work easier) + sec := packet.TimeHR >> 30 + // Shifting, masking, and dividing by 1 billion to get nanoseconds. + nsec := ((packet.TimeHR & 0x3FFFFFFF) << 30) / 1000 / 1000 / 1000 + timestamp = time.Unix(int64(sec), int64(nsec)).UTC().Round(time.Microsecond) + } else { + // If we don't have high resolution time, fall back to basic unix time + timestamp = time.Unix(int64(packet.Time), 0).UTC() + } + + var points []tsdb.Point + for i := range packet.Values { + name := fmt.Sprintf("%s_%s", packet.Plugin, packet.Values[i].Name) + tags := make(map[string]string) + fields := make(map[string]interface{}) + + fields["value"] = packet.Values[i].Value + + if packet.Hostname != "" { + tags["host"] = packet.Hostname + } + if packet.PluginInstance != "" { + tags["instance"] = packet.PluginInstance + } + if packet.Type != "" { + tags["type"] = packet.Type + } + if packet.TypeInstance != "" { + tags["type_instance"] = packet.TypeInstance + } + p := tsdb.NewPoint(name, tags, fields, timestamp) + + points = append(points, p) + } + return points +} + +// assert will panic with a given formatted message if the given condition is false. +func assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assert failed: "+msg, v...)) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go new file mode 100644 index 000000000..05e969041 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go @@ -0,0 +1,501 @@ +package collectd + +import ( + "encoding/hex" + "errors" + "io/ioutil" + "log" + "net" + "testing" + "time" + + "github.com/influxdb/influxdb/cluster" + "github.com/influxdb/influxdb/meta" + "github.com/influxdb/influxdb/toml" + "github.com/influxdb/influxdb/tsdb" +) + +// Test that the service checks / creates the target database on startup. +func TestService_CreatesDatabase(t *testing.T) { + t.Parallel() + + s := newTestService(1, time.Second) + + createDatabaseCalled := false + + ms := &testMetaStore{} + ms.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { + if name != s.Config.Database { + t.Errorf("\n\texp = %s\n\tgot = %s\n", s.Config.Database, name) + } + createDatabaseCalled = true + return nil, nil + } + s.Service.MetaStore = ms + + s.Open() + s.Close() + + if !createDatabaseCalled { + t.Errorf("CreateDatabaseIfNotExists should have been called when the service opened.") + } +} + +// Test that the collectd service correctly batches points by BatchSize. +func TestService_BatchSize(t *testing.T) { + t.Parallel() + + totalPoints := len(expPoints) + + // Batch sizes that totalTestPoints divide evenly by. + batchSizes := []int{1, 2, 13} + + for _, batchSize := range batchSizes { + func() { + s := newTestService(batchSize, time.Second) + + pointCh := make(chan tsdb.Point) + s.MetaStore.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil } + s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error { + if len(req.Points) != batchSize { + t.Errorf("\n\texp = %d\n\tgot = %d\n", batchSize, len(req.Points)) + } + + for _, p := range req.Points { + pointCh <- p + } + return nil + } + + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer func() { t.Log("closing service"); s.Close() }() + + // Get the address & port the service is listening on for collectd data. + addr := s.Addr() + conn, err := net.Dial("udp", addr.String()) + if err != nil { + t.Fatal(err) + } + + // Send the test data to the service. + if n, err := conn.Write(testData); err != nil { + t.Fatal(err) + } else if n != len(testData) { + t.Fatalf("only sent %d of %d bytes", n, len(testData)) + } + + points := []tsdb.Point{} + Loop: + for { + select { + case p := <-pointCh: + points = append(points, p) + if len(points) == totalPoints { + break Loop + } + case <-time.After(time.Second): + t.Logf("exp %d points, got %d", totalPoints, len(points)) + t.Fatal("timed out waiting for points from collectd service") + } + } + + if len(points) != totalPoints { + t.Fatalf("exp %d points, got %d", totalPoints, len(points)) + } + + for i, exp := range expPoints { + got := points[i].String() + if got != exp { + t.Fatalf("\n\texp = %s\n\tgot = %s\n", exp, got) + } + } + }() + } +} + +// Test that the collectd service correctly batches points using BatchDuration. +func TestService_BatchDuration(t *testing.T) { + t.Parallel() + + totalPoints := len(expPoints) + + s := newTestService(5000, 250*time.Millisecond) + + pointCh := make(chan tsdb.Point, 1000) + s.MetaStore.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil } + s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error { + for _, p := range req.Points { + pointCh <- p + } + return nil + } + + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer func() { t.Log("closing service"); s.Close() }() + + // Get the address & port the service is listening on for collectd data. + addr := s.Addr() + conn, err := net.Dial("udp", addr.String()) + if err != nil { + t.Fatal(err) + } + + // Send the test data to the service. + if n, err := conn.Write(testData); err != nil { + t.Fatal(err) + } else if n != len(testData) { + t.Fatalf("only sent %d of %d bytes", n, len(testData)) + } + + points := []tsdb.Point{} +Loop: + for { + select { + case p := <-pointCh: + points = append(points, p) + if len(points) == totalPoints { + break Loop + } + case <-time.After(time.Second): + t.Logf("exp %d points, got %d", totalPoints, len(points)) + t.Fatal("timed out waiting for points from collectd service") + } + } + + if len(points) != totalPoints { + t.Fatalf("exp %d points, got %d", totalPoints, len(points)) + } + + for i, exp := range expPoints { + got := points[i].String() + if got != exp { + t.Fatalf("\n\texp = %s\n\tgot = %s\n", exp, got) + } + } +} + +type testService struct { + *Service + MetaStore testMetaStore + PointsWriter testPointsWriter +} + +func newTestService(batchSize int, batchDuration time.Duration) *testService { + s := &testService{ + Service: NewService(Config{ + BindAddress: "127.0.0.1:0", + Database: "collectd_test", + BatchSize: batchSize, + BatchDuration: toml.Duration(batchDuration), + }), + } + s.Service.PointsWriter = &s.PointsWriter + s.Service.MetaStore = &s.MetaStore + + // Set the collectd types using test string. + if err := s.SetTypes(typesDBText); err != nil { + panic(err) + } + + if !testing.Verbose() { + s.Logger = log.New(ioutil.Discard, "", log.LstdFlags) + } + + return s +} + +type testPointsWriter struct { + WritePointsFn func(*cluster.WritePointsRequest) error +} + +func (w *testPointsWriter) WritePoints(p *cluster.WritePointsRequest) error { + return w.WritePointsFn(p) +} + +type testMetaStore struct { + CreateDatabaseIfNotExistsFn func(name string) (*meta.DatabaseInfo, error) + //DatabaseFn func(name string) (*meta.DatabaseInfo, error) +} + +func (ms *testMetaStore) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) { + return ms.CreateDatabaseIfNotExistsFn(name) +} + +func (ms *testMetaStore) WaitForLeader(d time.Duration) error { + return nil +} + +func wait(c chan struct{}, d time.Duration) (err error) { + select { + case <-c: + case <-time.After(d): + err = errors.New("timed out") + } + return +} + +func waitInt(c chan int, d time.Duration) (i int, err error) { + select { + case i = <-c: + case <-time.After(d): + err = errors.New("timed out") + } + return +} + +func check(err error) { + if err != nil { + panic(err) + } +} + +// Raw data sent by collectd, captured using Wireshark. +var testData = func() []byte { + b, err := hex.DecodeString("000000167066312d36322d3231302d39342d313733000001000c00000000544928ff0007000c00000000000000050002000c656e74726f7079000004000c656e74726f7079000006000f0001010000000000007240000200086370750000030006310000040008637075000005000969646c65000006000f0001000000000000a674620005000977616974000006000f0001000000000000000000000200076466000003000500000400076466000005000d6c6976652d636f7700000600180002010100000000a090b641000000a0cb6a2742000200086370750000030006310000040008637075000005000e696e74657272757074000006000f00010000000000000000fe0005000c736f6674697271000006000f000100000000000000000000020007646600000300050000040007646600000500096c6976650000060018000201010000000000000000000000e0ec972742000200086370750000030006310000040008637075000005000a737465616c000006000f00010000000000000000000003000632000005000975736572000006000f0001000000000000005f36000500096e696365000006000f0001000000000000000ad80002000e696e746572666163650000030005000004000e69665f6f6374657473000005000b64756d6d79300000060018000200000000000000000000000000000000041a000200076466000004000764660000050008746d70000006001800020101000000000000f240000000a0ea972742000200086370750000030006320000040008637075000005000b73797374656d000006000f00010000000000000045d30002000e696e746572666163650000030005000004000f69665f7061636b657473000005000b64756d6d79300000060018000200000000000000000000000000000000000f000200086370750000030006320000040008637075000005000969646c65000006000f0001000000000000a66480000200076466000003000500000400076466000005000d72756e2d6c6f636b000006001800020101000000000000000000000000000054410002000e696e74657266616365000004000e69665f6572726f7273000005000b64756d6d793000000600180002000000000000000000000000000000000000000200086370750000030006320000040008637075000005000977616974000006000f00010000000000000000000005000e696e74657272757074000006000f0001000000000000000132") + check(err) + return b +}() + +var expPoints = []string{ + "entropy_value,host=pf1-62-210-94-173,type=entropy value=288 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=idle value=10908770 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=wait value=0 1414080767000000000", + "df_used,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=378576896 1414080767000000000", + "df_free,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=50287988736 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=interrupt value=254 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=softirq value=0 1414080767000000000", + "df_used,host=pf1-62-210-94-173,type=df,type_instance=live value=0 1414080767000000000", + "df_free,host=pf1-62-210-94-173,type=df,type_instance=live value=50666565632 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=steal value=0 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=user value=24374 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=nice value=2776 1414080767000000000", + "interface_rx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=0 1414080767000000000", + "interface_tx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=1050 1414080767000000000", + "df_used,host=pf1-62-210-94-173,type=df,type_instance=tmp value=73728 1414080767000000000", + "df_free,host=pf1-62-210-94-173,type=df,type_instance=tmp value=50666491904 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=system value=17875 1414080767000000000", + "interface_rx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=0 1414080767000000000", + "interface_tx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=15 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=idle value=10904704 1414080767000000000", + "df_used,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=0 1414080767000000000", + "df_free,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=5242880 1414080767000000000", + "interface_rx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0 1414080767000000000", + "interface_tx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=wait value=0 1414080767000000000", + "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=interrupt value=306 1414080767000000000", +} + +// Taken from /usr/share/collectd/types.db on a Ubuntu system +var typesDBText = ` +absolute value:ABSOLUTE:0:U +apache_bytes value:DERIVE:0:U +apache_connections value:GAUGE:0:65535 +apache_idle_workers value:GAUGE:0:65535 +apache_requests value:DERIVE:0:U +apache_scoreboard value:GAUGE:0:65535 +ath_nodes value:GAUGE:0:65535 +ath_stat value:DERIVE:0:U +backends value:GAUGE:0:65535 +bitrate value:GAUGE:0:4294967295 +bytes value:GAUGE:0:U +cache_eviction value:DERIVE:0:U +cache_operation value:DERIVE:0:U +cache_ratio value:GAUGE:0:100 +cache_result value:DERIVE:0:U +cache_size value:GAUGE:0:4294967295 +charge value:GAUGE:0:U +compression_ratio value:GAUGE:0:2 +compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U +connections value:DERIVE:0:U +conntrack value:GAUGE:0:4294967295 +contextswitch value:DERIVE:0:U +counter value:COUNTER:U:U +cpufreq value:GAUGE:0:U +cpu value:DERIVE:0:U +current_connections value:GAUGE:0:U +current_sessions value:GAUGE:0:U +current value:GAUGE:U:U +delay value:GAUGE:-1000000:1000000 +derive value:DERIVE:0:U +df_complex value:GAUGE:0:U +df_inodes value:GAUGE:0:U +df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623 +disk_latency read:GAUGE:0:U, write:GAUGE:0:U +disk_merged read:DERIVE:0:U, write:DERIVE:0:U +disk_octets read:DERIVE:0:U, write:DERIVE:0:U +disk_ops_complex value:DERIVE:0:U +disk_ops read:DERIVE:0:U, write:DERIVE:0:U +disk_time read:DERIVE:0:U, write:DERIVE:0:U +dns_answer value:DERIVE:0:U +dns_notify value:DERIVE:0:U +dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U +dns_opcode value:DERIVE:0:U +dns_qtype_cached value:GAUGE:0:4294967295 +dns_qtype value:DERIVE:0:U +dns_query value:DERIVE:0:U +dns_question value:DERIVE:0:U +dns_rcode value:DERIVE:0:U +dns_reject value:DERIVE:0:U +dns_request value:DERIVE:0:U +dns_resolver value:DERIVE:0:U +dns_response value:DERIVE:0:U +dns_transfer value:DERIVE:0:U +dns_update value:DERIVE:0:U +dns_zops value:DERIVE:0:U +duration seconds:GAUGE:0:U +email_check value:GAUGE:0:U +email_count value:GAUGE:0:U +email_size value:GAUGE:0:U +entropy value:GAUGE:0:4294967295 +fanspeed value:GAUGE:0:U +file_size value:GAUGE:0:U +files value:GAUGE:0:U +fork_rate value:DERIVE:0:U +frequency_offset value:GAUGE:-1000000:1000000 +frequency value:GAUGE:0:U +fscache_stat value:DERIVE:0:U +gauge value:GAUGE:U:U +hash_collisions value:DERIVE:0:U +http_request_methods value:DERIVE:0:U +http_requests value:DERIVE:0:U +http_response_codes value:DERIVE:0:U +humidity value:GAUGE:0:100 +if_collisions value:DERIVE:0:U +if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U +if_errors rx:DERIVE:0:U, tx:DERIVE:0:U +if_multicast value:DERIVE:0:U +if_octets rx:DERIVE:0:U, tx:DERIVE:0:U +if_packets rx:DERIVE:0:U, tx:DERIVE:0:U +if_rx_errors value:DERIVE:0:U +if_rx_octets value:DERIVE:0:U +if_tx_errors value:DERIVE:0:U +if_tx_octets value:DERIVE:0:U +invocations value:DERIVE:0:U +io_octets rx:DERIVE:0:U, tx:DERIVE:0:U +io_packets rx:DERIVE:0:U, tx:DERIVE:0:U +ipt_bytes value:DERIVE:0:U +ipt_packets value:DERIVE:0:U +irq value:DERIVE:0:U +latency value:GAUGE:0:U +links value:GAUGE:0:U +load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000 +md_disks value:GAUGE:0:U +memcached_command value:DERIVE:0:U +memcached_connections value:GAUGE:0:U +memcached_items value:GAUGE:0:U +memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U +memcached_ops value:DERIVE:0:U +memory value:GAUGE:0:281474976710656 +multimeter value:GAUGE:U:U +mutex_operations value:DERIVE:0:U +mysql_commands value:DERIVE:0:U +mysql_handler value:DERIVE:0:U +mysql_locks value:DERIVE:0:U +mysql_log_position value:DERIVE:0:U +mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U +nfs_procedure value:DERIVE:0:U +nginx_connections value:GAUGE:0:U +nginx_requests value:DERIVE:0:U +node_octets rx:DERIVE:0:U, tx:DERIVE:0:U +node_rssi value:GAUGE:0:255 +node_stat value:DERIVE:0:U +node_tx_rate value:GAUGE:0:127 +objects value:GAUGE:0:U +operations value:DERIVE:0:U +percent value:GAUGE:0:100.1 +percent_bytes value:GAUGE:0:100.1 +percent_inodes value:GAUGE:0:100.1 +pf_counters value:DERIVE:0:U +pf_limits value:DERIVE:0:U +pf_source value:DERIVE:0:U +pf_states value:GAUGE:0:U +pf_state value:DERIVE:0:U +pg_blks value:DERIVE:0:U +pg_db_size value:GAUGE:0:U +pg_n_tup_c value:DERIVE:0:U +pg_n_tup_g value:GAUGE:0:U +pg_numbackends value:GAUGE:0:U +pg_scan value:DERIVE:0:U +pg_xact value:DERIVE:0:U +ping_droprate value:GAUGE:0:100 +ping_stddev value:GAUGE:0:65535 +ping value:GAUGE:0:65535 +players value:GAUGE:0:1000000 +power value:GAUGE:0:U +protocol_counter value:DERIVE:0:U +ps_code value:GAUGE:0:9223372036854775807 +ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000 +ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U +ps_data value:GAUGE:0:9223372036854775807 +ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U +ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U +ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U +ps_rss value:GAUGE:0:9223372036854775807 +ps_stacksize value:GAUGE:0:9223372036854775807 +ps_state value:GAUGE:0:65535 +ps_vm value:GAUGE:0:9223372036854775807 +queue_length value:GAUGE:0:U +records value:GAUGE:0:U +requests value:GAUGE:0:U +response_time value:GAUGE:0:U +response_code value:GAUGE:0:U +route_etx value:GAUGE:0:U +route_metric value:GAUGE:0:U +routes value:GAUGE:0:U +serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U +signal_noise value:GAUGE:U:0 +signal_power value:GAUGE:U:0 +signal_quality value:GAUGE:0:U +snr value:GAUGE:0:U +spam_check value:GAUGE:0:U +spam_score value:GAUGE:U:U +spl value:GAUGE:U:U +swap_io value:DERIVE:0:U +swap value:GAUGE:0:1099511627776 +tcp_connections value:GAUGE:0:4294967295 +temperature value:GAUGE:U:U +threads value:GAUGE:0:U +time_dispersion value:GAUGE:-1000000:1000000 +timeleft value:GAUGE:0:U +time_offset value:GAUGE:-1000000:1000000 +total_bytes value:DERIVE:0:U +total_connections value:DERIVE:0:U +total_objects value:DERIVE:0:U +total_operations value:DERIVE:0:U +total_requests value:DERIVE:0:U +total_sessions value:DERIVE:0:U +total_threads value:DERIVE:0:U +total_time_in_ms value:DERIVE:0:U +total_values value:DERIVE:0:U +uptime value:GAUGE:0:4294967295 +users value:GAUGE:0:65535 +vcl value:GAUGE:0:65535 +vcpu value:GAUGE:0:U +virt_cpu_total value:DERIVE:0:U +virt_vcpu value:DERIVE:0:U +vmpage_action value:DERIVE:0:U +vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U +vmpage_io in:DERIVE:0:U, out:DERIVE:0:U +vmpage_number value:GAUGE:0:4294967295 +volatile_changes value:GAUGE:0:U +voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U +voltage value:GAUGE:U:U +vs_memory value:GAUGE:0:9223372036854775807 +vs_processes value:GAUGE:0:65535 +vs_threads value:GAUGE:0:65535 +# +# Legacy types +# (required for the v5 upgrade target) +# +arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U +arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U +arc_l2_size value:GAUGE:0:U +arc_ratio value:GAUGE:0:U +arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U +mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U +mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U +` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/README.md new file mode 100644 index 000000000..90de2b2b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/README.md @@ -0,0 +1,3 @@ +collectD Client +============ +This directory contains code for generating collectd load. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/client.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/client.go new file mode 100644 index 000000000..790f5e871 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/client.go @@ -0,0 +1,71 @@ +package main + +import ( + "collectd.org/api" + "collectd.org/network" + + "flag" + "fmt" + "math/rand" + "os" + "strconv" + "time" +) + +var nMeasurments = flag.Int("m", 1, "Number of measurements") +var tagVariance = flag.Int("v", 1, "Number of values per tag. Client is fixed at one tag") +var rate = flag.Int("r", 1, "Number of points per second") +var total = flag.Int("t", -1, "Total number of points to send (default is no limit)") +var host = flag.String("u", "127.0.0.1:25826", "Destination host in the form host:port") + +func main() { + flag.Parse() + + conn, err := network.Dial(*host, network.ClientOptions{}) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + defer conn.Close() + + rateLimiter := make(chan int, *rate) + + go func() { + ticker := time.NewTicker(time.Second) + for { + select { + case <-ticker.C: + for i := 0; i < *rate; i++ { + rateLimiter <- i + } + } + } + }() + + nSent := 0 + for { + if nSent >= *total && *total > 0 { + break + } + <-rateLimiter + + vl := api.ValueList{ + Identifier: api.Identifier{ + Host: "tagvalue" + strconv.Itoa(int(rand.Int31n(int32(*tagVariance)))), + Plugin: "golang" + strconv.Itoa(int(rand.Int31n(int32(*nMeasurments)))), + Type: "gauge", + }, + Time: time.Now(), + Interval: 10 * time.Second, + Values: []api.Value{api.Gauge(42.0)}, + } + if err := conn.Write(vl); err != nil { + fmt.Println(err) + os.Exit(1) + } + conn.Flush() + nSent = nSent + 1 + } + + fmt.Println("Number of points sent:", nSent) +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go new file mode 100644 index 000000000..1549a816b --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go @@ -0,0 +1,65 @@ +package continuous_querier + +import ( + "time" + + "github.com/influxdb/influxdb/toml" +) + +const ( + DefaultRecomputePreviousN = 2 + + DefaultRecomputeNoOlderThan = 10 * time.Minute + + DefaultComputeRunsPerInterval = 10 + + DefaultComputeNoMoreThan = 2 * time.Minute +) + +// Config represents a configuration for the continuous query service. +type Config struct { + // Enables logging in CQ service to display when CQ's are processed and how many points are wrote. + LogEnabled bool `toml:"log-enabled"` + + // If this flag is set to false, both the brokers and data nodes should ignore any CQ processing. + Enabled bool `toml:"enabled"` + + // when continuous queries are run we'll automatically recompute previous intervals + // in case lagged data came in. Set to zero if you never have lagged data. We do + // it this way because invalidating previously computed intervals would be insanely hard + // and expensive. + RecomputePreviousN int `toml:"recompute-previous-n"` + + // The RecomputePreviousN setting provides guidance for how far back to recompute, the RecomputeNoOlderThan + // setting sets a ceiling on how far back in time it will go. For example, if you have 2 PreviousN + // and have this set to 10m, then we'd only compute the previous two intervals for any + // CQs that have a group by time <= 5m. For all others, we'd only recompute the previous window + RecomputeNoOlderThan toml.Duration `toml:"recompute-no-older-than"` + + // ComputeRunsPerInterval will determine how many times the current and previous N intervals + // will be computed. The group by time will be divided by this and it will get computed this many times: + // group by time seconds / runs per interval + // This will give partial results for current group by intervals and will determine how long it will + // be until lagged data is recomputed. For example, if this number is 10 and the group by time is 10m, it + // will be a minute past the previous 10m bucket of time before lagged data is picked up + ComputeRunsPerInterval int `toml:"compute-runs-per-interval"` + + // ComputeNoMoreThan paired with the RunsPerInterval will determine the ceiling of how many times smaller + // group by times will be computed. For example, if you have RunsPerInterval set to 10 and this setting + // to 1m. Then for a group by time(1m) will actually only get computed once per interval (and once per PreviousN). + // If you have a group by time(5m) then you'll get five computes per interval. Any group by time window larger + // than 10m will get computed 10 times for each interval. + ComputeNoMoreThan toml.Duration `toml:"compute-no-more-than"` +} + +// NewConfig returns a new instance of Config with defaults. +func NewConfig() Config { + return Config{ + LogEnabled: true, + Enabled: true, + RecomputePreviousN: DefaultRecomputePreviousN, + RecomputeNoOlderThan: toml.Duration(DefaultRecomputeNoOlderThan), + ComputeRunsPerInterval: DefaultComputeRunsPerInterval, + ComputeNoMoreThan: toml.Duration(DefaultComputeNoMoreThan), + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config_test.go new file mode 100644 index 000000000..2a0edc4f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config_test.go @@ -0,0 +1,36 @@ +package continuous_querier_test + +import ( + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdb/influxdb/services/continuous_querier" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c continuous_querier.Config + if _, err := toml.Decode(` +recompute-previous-n = 1 +recompute-no-older-than = "10s" +compute-runs-per-interval = 2 +compute-no-more-than = "20s" +enabled = true +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.RecomputePreviousN != 1 { + t.Fatalf("unexpected recompute previous n: %d", c.RecomputePreviousN) + } else if time.Duration(c.RecomputeNoOlderThan) != 10*time.Second { + t.Fatalf("unexpected recompute no older than: %v", c.RecomputeNoOlderThan) + } else if c.ComputeRunsPerInterval != 2 { + t.Fatalf("unexpected compute runs per interval: %d", c.ComputeRunsPerInterval) + } else if time.Duration(c.ComputeNoMoreThan) != 20*time.Second { + t.Fatalf("unexpected compute no more than: %v", c.ComputeNoMoreThan) + } else if c.Enabled != true { + t.Fatalf("unexpected enabled: %v", c.Enabled) + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/continuous_queries.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/continuous_queries.md new file mode 100644 index 000000000..bd14b5161 --- /dev/null +++ b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/continuous_queries.md @@ -0,0 +1,236 @@ +# Continuous Queries + +This document lays out continuous queries and a proposed architecture for how they'll work within an InfluxDB cluster. + +## Definition of Continuous Queries + +Continuous queries serve two purposes in InfluxDB: + +1. Combining many series into a single series (i.e. removing 1 or more tag dimensions to make queries more efficient) +2. Aggregating and downsampling series + +The purpose of both types of continuous queries is to duplicate or downsample data automatically in the background to make querying thier results fast and efficient. Think of them as another way to create indexes on data. + +Generally, there are continuous queries that create copyies of data into another measurement or tagset and queries that downsample and aggregate data. The only difference between the two types is if the query has a `GROUP BY time` clause. + +Before we get to the continuous query examples, we need to define the `INTO` syntax of queries. + +### INTO + +`INTO` is a method for running a query and having it output into either another measurement name, retention policy, or database. The syntax looks like this: + +```sql +SELECT * +INTO [.] [ON ] +FROM +[WHERE ...] +[GROUP BY ...] +``` + +The syntax states that the retention policy, database, where clause, and group by clause are all optional. If a retention policy isn't specified, the database's default retention policy will be written into. If the database isn't specified, the database the query is running from will be written into. + +By selecting specific fields, `INTO` can merge many series into one that will go into a new either a new measurement, retention policy, or database. For example: + +```sql +SELECT mean(value) as value, region +INTO "1h.cpu_load" +FROM cpu_load +GROUP BY time(1h), region +``` + +That will give 1h summaries of the mean value of the `cpu_load` for each `region`. Specifying `region` in the `GROUP BY` clause is unnecessary since having it in the `SELECT` clause forces it to be grouped by that tag, we've just included it in the example for clarity. + +With `SELECT ... INTO`, fields will be written as fields and tags will be written as tags. + +### Continuous Query Syntax + +The `INTO` queries run once. Continuous queries will turn `INTO` queries into something that run in the background in the cluster. They're kind of like triggers in SQL. + +```sql +CREATE CONTINUOUS QUERY "1h_cpu_load" +ON database_name +BEGIN + SELECT mean(value) as value, region + INTO "1h.cpu_load" + FROM cpu_load + GROUP BY time(1h), region +END +``` + +Or chain them together: + +```sql +CREATE CONTINUOUS QUERY "10m_event_count" +ON database_name +BEGIN + SELECT count(value) + INTO "10m.events" + FROM events + GROUP BY time(10m) +END + +-- this selects from the output of one continuous query and outputs to another series +CREATE CONTINUOUS QUERY "1h_event_count" +ON database_name +BEGIN + SELECT sum(count) as count + INTO "1h.events" + FROM events + GROUP BY time(1h) +END +``` + +Or multiple aggregations from all series in a measurement. This example assumes you have a retention policy named `1h`. + +```sql +CREATE CONTINUOUS QUERY "1h_cpu_load" +ON database_name +BEGIN + SELECT mean(value), percentile(80, value) as percentile_80, percentile(95, value) as percentile_95 + INTO "1h.cpu_load" + FROM cpu_load + GROUP BY time(1h), * +END +``` + +The `GROUP BY *` indicates that we want to group by the tagset of the points written in. The same tags will be written to the output series. The multiple aggregates in the `SELECT` clause (percentile, mean) will be written in as fields to the resulting series. + +Showing what continuous queries we have: + +```sql +LIST CONTINUOUS QUERIES +``` + +Dropping continuous queries: + +```sql +DROP CONTINUOUS QUERY +ON +``` + +### Security + +To create or drop a continuous query, the user must be an admin. + +### Limitations + +In order to prevent cycles and endless copying of data, the following limitation is enforced on continuous queries at create time: + +*The output of a continuous query must go to either a different measurement or to a different retention policy.* + +In theory they'd still be able to create a cycle with multiple continuous queries. We should check for these and disallow. + +## Proposed Architecture + +Continuous queries should be stored in the metastore cluster wide. That is, they amount to database schema that should be stored in every server in a cluster. + +Continuous queries will have to be handled in a different way for two different use cases: those that simply copy data (CQs without a group by time) and those that aggregate and downsample data (those with a group by time). + +### No group by time + +For CQs that have no `GROUP BY time` clause, they should be evaluated at the data node as part of the write. The single write should create any other writes for the CQ and submit those in the same request to the brokers to ensure that all writes succeed (both the original and the new CQ writes) or none do. + +I imagine the process going something like this: + +1. Convert the data point into its compact form `